1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Doug Rabson 5 * Copyright (c) 1994-1995 Søren Schmidt 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/blist.h> 39 #include <sys/fcntl.h> 40 #if defined(__i386__) 41 #include <sys/imgact_aout.h> 42 #endif 43 #include <sys/jail.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mman.h> 49 #include <sys/mount.h> 50 #include <sys/msgbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/namei.h> 53 #include <sys/priv.h> 54 #include <sys/proc.h> 55 #include <sys/procctl.h> 56 #include <sys/reboot.h> 57 #include <sys/racct.h> 58 #include <sys/random.h> 59 #include <sys/resourcevar.h> 60 #include <sys/sched.h> 61 #include <sys/sdt.h> 62 #include <sys/signalvar.h> 63 #include <sys/stat.h> 64 #include <sys/syscallsubr.h> 65 #include <sys/sysctl.h> 66 #include <sys/sysproto.h> 67 #include <sys/systm.h> 68 #include <sys/time.h> 69 #include <sys/vmmeter.h> 70 #include <sys/vnode.h> 71 #include <sys/wait.h> 72 #include <sys/cpuset.h> 73 #include <sys/uio.h> 74 75 #include <security/mac/mac_framework.h> 76 77 #include <vm/vm.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_kern.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_extern.h> 82 #include <vm/swap_pager.h> 83 84 #ifdef COMPAT_LINUX32 85 #include <machine/../linux32/linux.h> 86 #include <machine/../linux32/linux32_proto.h> 87 #else 88 #include <machine/../linux/linux.h> 89 #include <machine/../linux/linux_proto.h> 90 #endif 91 92 #include <compat/linux/linux_dtrace.h> 93 #include <compat/linux/linux_file.h> 94 #include <compat/linux/linux_mib.h> 95 #include <compat/linux/linux_signal.h> 96 #include <compat/linux/linux_timer.h> 97 #include <compat/linux/linux_util.h> 98 #include <compat/linux/linux_sysproto.h> 99 #include <compat/linux/linux_emul.h> 100 #include <compat/linux/linux_misc.h> 101 102 /** 103 * Special DTrace provider for the linuxulator. 104 * 105 * In this file we define the provider for the entire linuxulator. All 106 * modules (= files of the linuxulator) use it. 107 * 108 * We define a different name depending on the emulated bitsize, see 109 * ../../<ARCH>/linux{,32}/linux.h, e.g.: 110 * native bitsize = linuxulator 111 * amd64, 32bit emulation = linuxulator32 112 */ 113 LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE); 114 115 int stclohz; /* Statistics clock frequency */ 116 117 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 118 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 119 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 120 RLIMIT_MEMLOCK, RLIMIT_AS 121 }; 122 123 struct l_sysinfo { 124 l_long uptime; /* Seconds since boot */ 125 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 126 #define LINUX_SYSINFO_LOADS_SCALE 65536 127 l_ulong totalram; /* Total usable main memory size */ 128 l_ulong freeram; /* Available memory size */ 129 l_ulong sharedram; /* Amount of shared memory */ 130 l_ulong bufferram; /* Memory used by buffers */ 131 l_ulong totalswap; /* Total swap space size */ 132 l_ulong freeswap; /* swap space still available */ 133 l_ushort procs; /* Number of current processes */ 134 l_ushort pads; 135 l_ulong totalhigh; 136 l_ulong freehigh; 137 l_uint mem_unit; 138 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 139 }; 140 141 struct l_pselect6arg { 142 l_uintptr_t ss; 143 l_size_t ss_len; 144 }; 145 146 static int linux_utimensat_nsec_valid(l_long); 147 148 int 149 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 150 { 151 struct l_sysinfo sysinfo; 152 int i, j; 153 struct timespec ts; 154 155 bzero(&sysinfo, sizeof(sysinfo)); 156 getnanouptime(&ts); 157 if (ts.tv_nsec != 0) 158 ts.tv_sec++; 159 sysinfo.uptime = ts.tv_sec; 160 161 /* Use the information from the mib to get our load averages */ 162 for (i = 0; i < 3; i++) 163 sysinfo.loads[i] = averunnable.ldavg[i] * 164 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 165 166 sysinfo.totalram = physmem * PAGE_SIZE; 167 sysinfo.freeram = (u_long)vm_free_count() * PAGE_SIZE; 168 169 /* 170 * sharedram counts pages allocated to named, swap-backed objects such 171 * as shared memory segments and tmpfs files. There is no cheap way to 172 * compute this, so just leave the field unpopulated. Linux itself only 173 * started setting this field in the 3.x timeframe. 174 */ 175 sysinfo.sharedram = 0; 176 sysinfo.bufferram = 0; 177 178 swap_pager_status(&i, &j); 179 sysinfo.totalswap = i * PAGE_SIZE; 180 sysinfo.freeswap = (i - j) * PAGE_SIZE; 181 182 sysinfo.procs = nprocs; 183 184 /* 185 * Platforms supported by the emulation layer do not have a notion of 186 * high memory. 187 */ 188 sysinfo.totalhigh = 0; 189 sysinfo.freehigh = 0; 190 191 sysinfo.mem_unit = 1; 192 193 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 194 } 195 196 #ifdef LINUX_LEGACY_SYSCALLS 197 int 198 linux_alarm(struct thread *td, struct linux_alarm_args *args) 199 { 200 struct itimerval it, old_it; 201 u_int secs; 202 int error; 203 204 secs = args->secs; 205 /* 206 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2 207 * to match kern_setitimer()'s limit to avoid error from it. 208 * 209 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit 210 * platforms. 211 */ 212 if (secs > INT32_MAX / 2) 213 secs = INT32_MAX / 2; 214 215 it.it_value.tv_sec = secs; 216 it.it_value.tv_usec = 0; 217 timevalclear(&it.it_interval); 218 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 219 KASSERT(error == 0, ("kern_setitimer returns %d", error)); 220 221 if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) || 222 old_it.it_value.tv_usec >= 500000) 223 old_it.it_value.tv_sec++; 224 td->td_retval[0] = old_it.it_value.tv_sec; 225 return (0); 226 } 227 #endif 228 229 int 230 linux_brk(struct thread *td, struct linux_brk_args *args) 231 { 232 struct vmspace *vm = td->td_proc->p_vmspace; 233 uintptr_t new, old; 234 235 old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize); 236 new = (uintptr_t)args->dsend; 237 if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new)) 238 td->td_retval[0] = (register_t)new; 239 else 240 td->td_retval[0] = (register_t)old; 241 242 return (0); 243 } 244 245 #if defined(__i386__) 246 /* XXX: what about amd64/linux32? */ 247 248 int 249 linux_uselib(struct thread *td, struct linux_uselib_args *args) 250 { 251 struct nameidata ni; 252 struct vnode *vp; 253 struct exec *a_out; 254 vm_map_t map; 255 vm_map_entry_t entry; 256 struct vattr attr; 257 vm_offset_t vmaddr; 258 unsigned long file_offset; 259 unsigned long bss_size; 260 char *library; 261 ssize_t aresid; 262 int error; 263 bool locked, opened, textset; 264 265 a_out = NULL; 266 vp = NULL; 267 locked = false; 268 textset = false; 269 opened = false; 270 271 if (!LUSECONVPATH(td)) { 272 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 273 UIO_USERSPACE, args->library, td); 274 error = namei(&ni); 275 } else { 276 LCONVPATHEXIST(td, args->library, &library); 277 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 278 UIO_SYSSPACE, library, td); 279 error = namei(&ni); 280 LFREEPATH(library); 281 } 282 if (error) 283 goto cleanup; 284 285 vp = ni.ni_vp; 286 NDFREE(&ni, NDF_ONLY_PNBUF); 287 288 /* 289 * From here on down, we have a locked vnode that must be unlocked. 290 * XXX: The code below largely duplicates exec_check_permissions(). 291 */ 292 locked = true; 293 294 /* Executable? */ 295 error = VOP_GETATTR(vp, &attr, td->td_ucred); 296 if (error) 297 goto cleanup; 298 299 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 300 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 301 /* EACCESS is what exec(2) returns. */ 302 error = ENOEXEC; 303 goto cleanup; 304 } 305 306 /* Sensible size? */ 307 if (attr.va_size == 0) { 308 error = ENOEXEC; 309 goto cleanup; 310 } 311 312 /* Can we access it? */ 313 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 314 if (error) 315 goto cleanup; 316 317 /* 318 * XXX: This should use vn_open() so that it is properly authorized, 319 * and to reduce code redundancy all over the place here. 320 * XXX: Not really, it duplicates far more of exec_check_permissions() 321 * than vn_open(). 322 */ 323 #ifdef MAC 324 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 325 if (error) 326 goto cleanup; 327 #endif 328 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 329 if (error) 330 goto cleanup; 331 opened = true; 332 333 /* Pull in executable header into exec_map */ 334 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 335 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 336 if (error) 337 goto cleanup; 338 339 /* Is it a Linux binary ? */ 340 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 341 error = ENOEXEC; 342 goto cleanup; 343 } 344 345 /* 346 * While we are here, we should REALLY do some more checks 347 */ 348 349 /* Set file/virtual offset based on a.out variant. */ 350 switch ((int)(a_out->a_magic & 0xffff)) { 351 case 0413: /* ZMAGIC */ 352 file_offset = 1024; 353 break; 354 case 0314: /* QMAGIC */ 355 file_offset = 0; 356 break; 357 default: 358 error = ENOEXEC; 359 goto cleanup; 360 } 361 362 bss_size = round_page(a_out->a_bss); 363 364 /* Check various fields in header for validity/bounds. */ 365 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 366 error = ENOEXEC; 367 goto cleanup; 368 } 369 370 /* text + data can't exceed file size */ 371 if (a_out->a_data + a_out->a_text > attr.va_size) { 372 error = EFAULT; 373 goto cleanup; 374 } 375 376 /* 377 * text/data/bss must not exceed limits 378 * XXX - this is not complete. it should check current usage PLUS 379 * the resources needed by this library. 380 */ 381 PROC_LOCK(td->td_proc); 382 if (a_out->a_text > maxtsiz || 383 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 384 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 385 bss_size) != 0) { 386 PROC_UNLOCK(td->td_proc); 387 error = ENOMEM; 388 goto cleanup; 389 } 390 PROC_UNLOCK(td->td_proc); 391 392 /* 393 * Prevent more writers. 394 */ 395 error = VOP_SET_TEXT(vp); 396 if (error != 0) 397 goto cleanup; 398 textset = true; 399 400 /* 401 * Lock no longer needed 402 */ 403 locked = false; 404 VOP_UNLOCK(vp); 405 406 /* 407 * Check if file_offset page aligned. Currently we cannot handle 408 * misalinged file offsets, and so we read in the entire image 409 * (what a waste). 410 */ 411 if (file_offset & PAGE_MASK) { 412 /* Map text+data read/write/execute */ 413 414 /* a_entry is the load address and is page aligned */ 415 vmaddr = trunc_page(a_out->a_entry); 416 417 /* get anon user mapping, read+write+execute */ 418 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 419 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 420 VM_PROT_ALL, VM_PROT_ALL, 0); 421 if (error) 422 goto cleanup; 423 424 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 425 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 426 td->td_ucred, NOCRED, &aresid, td); 427 if (error != 0) 428 goto cleanup; 429 if (aresid != 0) { 430 error = ENOEXEC; 431 goto cleanup; 432 } 433 } else { 434 /* 435 * for QMAGIC, a_entry is 20 bytes beyond the load address 436 * to skip the executable header 437 */ 438 vmaddr = trunc_page(a_out->a_entry); 439 440 /* 441 * Map it all into the process's space as a single 442 * copy-on-write "data" segment. 443 */ 444 map = &td->td_proc->p_vmspace->vm_map; 445 error = vm_mmap(map, &vmaddr, 446 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 447 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 448 if (error) 449 goto cleanup; 450 vm_map_lock(map); 451 if (!vm_map_lookup_entry(map, vmaddr, &entry)) { 452 vm_map_unlock(map); 453 error = EDOOFUS; 454 goto cleanup; 455 } 456 entry->eflags |= MAP_ENTRY_VN_EXEC; 457 vm_map_unlock(map); 458 textset = false; 459 } 460 461 if (bss_size != 0) { 462 /* Calculate BSS start address */ 463 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 464 a_out->a_data; 465 466 /* allocate some 'anon' space */ 467 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 468 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 469 VM_PROT_ALL, 0); 470 if (error) 471 goto cleanup; 472 } 473 474 cleanup: 475 if (opened) { 476 if (locked) 477 VOP_UNLOCK(vp); 478 locked = false; 479 VOP_CLOSE(vp, FREAD, td->td_ucred, td); 480 } 481 if (textset) { 482 if (!locked) { 483 locked = true; 484 VOP_LOCK(vp, LK_SHARED | LK_RETRY); 485 } 486 VOP_UNSET_TEXT_CHECKED(vp); 487 } 488 if (locked) 489 VOP_UNLOCK(vp); 490 491 /* Release the temporary mapping. */ 492 if (a_out) 493 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 494 495 return (error); 496 } 497 498 #endif /* __i386__ */ 499 500 #ifdef LINUX_LEGACY_SYSCALLS 501 int 502 linux_select(struct thread *td, struct linux_select_args *args) 503 { 504 l_timeval ltv; 505 struct timeval tv0, tv1, utv, *tvp; 506 int error; 507 508 /* 509 * Store current time for computation of the amount of 510 * time left. 511 */ 512 if (args->timeout) { 513 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 514 goto select_out; 515 utv.tv_sec = ltv.tv_sec; 516 utv.tv_usec = ltv.tv_usec; 517 518 if (itimerfix(&utv)) { 519 /* 520 * The timeval was invalid. Convert it to something 521 * valid that will act as it does under Linux. 522 */ 523 utv.tv_sec += utv.tv_usec / 1000000; 524 utv.tv_usec %= 1000000; 525 if (utv.tv_usec < 0) { 526 utv.tv_sec -= 1; 527 utv.tv_usec += 1000000; 528 } 529 if (utv.tv_sec < 0) 530 timevalclear(&utv); 531 } 532 microtime(&tv0); 533 tvp = &utv; 534 } else 535 tvp = NULL; 536 537 error = kern_select(td, args->nfds, args->readfds, args->writefds, 538 args->exceptfds, tvp, LINUX_NFDBITS); 539 if (error) 540 goto select_out; 541 542 if (args->timeout) { 543 if (td->td_retval[0]) { 544 /* 545 * Compute how much time was left of the timeout, 546 * by subtracting the current time and the time 547 * before we started the call, and subtracting 548 * that result from the user-supplied value. 549 */ 550 microtime(&tv1); 551 timevalsub(&tv1, &tv0); 552 timevalsub(&utv, &tv1); 553 if (utv.tv_sec < 0) 554 timevalclear(&utv); 555 } else 556 timevalclear(&utv); 557 ltv.tv_sec = utv.tv_sec; 558 ltv.tv_usec = utv.tv_usec; 559 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 560 goto select_out; 561 } 562 563 select_out: 564 return (error); 565 } 566 #endif 567 568 int 569 linux_mremap(struct thread *td, struct linux_mremap_args *args) 570 { 571 uintptr_t addr; 572 size_t len; 573 int error = 0; 574 575 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 576 td->td_retval[0] = 0; 577 return (EINVAL); 578 } 579 580 /* 581 * Check for the page alignment. 582 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 583 */ 584 if (args->addr & PAGE_MASK) { 585 td->td_retval[0] = 0; 586 return (EINVAL); 587 } 588 589 args->new_len = round_page(args->new_len); 590 args->old_len = round_page(args->old_len); 591 592 if (args->new_len > args->old_len) { 593 td->td_retval[0] = 0; 594 return (ENOMEM); 595 } 596 597 if (args->new_len < args->old_len) { 598 addr = args->addr + args->new_len; 599 len = args->old_len - args->new_len; 600 error = kern_munmap(td, addr, len); 601 } 602 603 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 604 return (error); 605 } 606 607 #define LINUX_MS_ASYNC 0x0001 608 #define LINUX_MS_INVALIDATE 0x0002 609 #define LINUX_MS_SYNC 0x0004 610 611 int 612 linux_msync(struct thread *td, struct linux_msync_args *args) 613 { 614 615 return (kern_msync(td, args->addr, args->len, 616 args->fl & ~LINUX_MS_SYNC)); 617 } 618 619 #ifdef LINUX_LEGACY_SYSCALLS 620 int 621 linux_time(struct thread *td, struct linux_time_args *args) 622 { 623 struct timeval tv; 624 l_time_t tm; 625 int error; 626 627 microtime(&tv); 628 tm = tv.tv_sec; 629 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 630 return (error); 631 td->td_retval[0] = tm; 632 return (0); 633 } 634 #endif 635 636 struct l_times_argv { 637 l_clock_t tms_utime; 638 l_clock_t tms_stime; 639 l_clock_t tms_cutime; 640 l_clock_t tms_cstime; 641 }; 642 643 /* 644 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 645 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 646 * auxiliary vector entry. 647 */ 648 #define CLK_TCK 100 649 650 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 651 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 652 653 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 654 CONVNTCK(r) : CONVOTCK(r)) 655 656 int 657 linux_times(struct thread *td, struct linux_times_args *args) 658 { 659 struct timeval tv, utime, stime, cutime, cstime; 660 struct l_times_argv tms; 661 struct proc *p; 662 int error; 663 664 if (args->buf != NULL) { 665 p = td->td_proc; 666 PROC_LOCK(p); 667 PROC_STATLOCK(p); 668 calcru(p, &utime, &stime); 669 PROC_STATUNLOCK(p); 670 calccru(p, &cutime, &cstime); 671 PROC_UNLOCK(p); 672 673 tms.tms_utime = CONVTCK(utime); 674 tms.tms_stime = CONVTCK(stime); 675 676 tms.tms_cutime = CONVTCK(cutime); 677 tms.tms_cstime = CONVTCK(cstime); 678 679 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 680 return (error); 681 } 682 683 microuptime(&tv); 684 td->td_retval[0] = (int)CONVTCK(tv); 685 return (0); 686 } 687 688 int 689 linux_newuname(struct thread *td, struct linux_newuname_args *args) 690 { 691 struct l_new_utsname utsname; 692 char osname[LINUX_MAX_UTSNAME]; 693 char osrelease[LINUX_MAX_UTSNAME]; 694 char *p; 695 696 linux_get_osname(td, osname); 697 linux_get_osrelease(td, osrelease); 698 699 bzero(&utsname, sizeof(utsname)); 700 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 701 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 702 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 703 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 704 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 705 for (p = utsname.version; *p != '\0'; ++p) 706 if (*p == '\n') { 707 *p = '\0'; 708 break; 709 } 710 #if defined(__amd64__) 711 /* 712 * On amd64, Linux uname(2) needs to return "x86_64" 713 * for both 64-bit and 32-bit applications. On 32-bit, 714 * the string returned by getauxval(AT_PLATFORM) needs 715 * to remain "i686", though. 716 */ 717 strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME); 718 #else 719 strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME); 720 #endif 721 722 return (copyout(&utsname, args->buf, sizeof(utsname))); 723 } 724 725 struct l_utimbuf { 726 l_time_t l_actime; 727 l_time_t l_modtime; 728 }; 729 730 #ifdef LINUX_LEGACY_SYSCALLS 731 int 732 linux_utime(struct thread *td, struct linux_utime_args *args) 733 { 734 struct timeval tv[2], *tvp; 735 struct l_utimbuf lut; 736 char *fname; 737 int error; 738 bool convpath; 739 740 convpath = LUSECONVPATH(td); 741 if (convpath) 742 LCONVPATHEXIST(td, args->fname, &fname); 743 744 if (args->times) { 745 if ((error = copyin(args->times, &lut, sizeof lut))) { 746 if (convpath) 747 LFREEPATH(fname); 748 return (error); 749 } 750 tv[0].tv_sec = lut.l_actime; 751 tv[0].tv_usec = 0; 752 tv[1].tv_sec = lut.l_modtime; 753 tv[1].tv_usec = 0; 754 tvp = tv; 755 } else 756 tvp = NULL; 757 758 if (!convpath) { 759 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 760 tvp, UIO_SYSSPACE); 761 } else { 762 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp, 763 UIO_SYSSPACE); 764 LFREEPATH(fname); 765 } 766 return (error); 767 } 768 #endif 769 770 #ifdef LINUX_LEGACY_SYSCALLS 771 int 772 linux_utimes(struct thread *td, struct linux_utimes_args *args) 773 { 774 l_timeval ltv[2]; 775 struct timeval tv[2], *tvp = NULL; 776 char *fname; 777 int error; 778 bool convpath; 779 780 convpath = LUSECONVPATH(td); 781 if (convpath) 782 LCONVPATHEXIST(td, args->fname, &fname); 783 784 if (args->tptr != NULL) { 785 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 786 LFREEPATH(fname); 787 return (error); 788 } 789 tv[0].tv_sec = ltv[0].tv_sec; 790 tv[0].tv_usec = ltv[0].tv_usec; 791 tv[1].tv_sec = ltv[1].tv_sec; 792 tv[1].tv_usec = ltv[1].tv_usec; 793 tvp = tv; 794 } 795 796 if (!convpath) { 797 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 798 tvp, UIO_SYSSPACE); 799 } else { 800 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, 801 tvp, UIO_SYSSPACE); 802 LFREEPATH(fname); 803 } 804 return (error); 805 } 806 #endif 807 808 static int 809 linux_utimensat_nsec_valid(l_long nsec) 810 { 811 812 if (nsec == LINUX_UTIME_OMIT || nsec == LINUX_UTIME_NOW) 813 return (0); 814 if (nsec >= 0 && nsec <= 999999999) 815 return (0); 816 return (1); 817 } 818 819 int 820 linux_utimensat(struct thread *td, struct linux_utimensat_args *args) 821 { 822 struct l_timespec l_times[2]; 823 struct timespec times[2], *timesp = NULL; 824 char *path = NULL; 825 int error, dfd, flags = 0; 826 827 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 828 829 if (args->flags & ~LINUX_AT_SYMLINK_NOFOLLOW) 830 return (EINVAL); 831 832 if (args->times != NULL) { 833 error = copyin(args->times, l_times, sizeof(l_times)); 834 if (error != 0) 835 return (error); 836 837 if (linux_utimensat_nsec_valid(l_times[0].tv_nsec) != 0 || 838 linux_utimensat_nsec_valid(l_times[1].tv_nsec) != 0) 839 return (EINVAL); 840 841 times[0].tv_sec = l_times[0].tv_sec; 842 switch (l_times[0].tv_nsec) 843 { 844 case LINUX_UTIME_OMIT: 845 times[0].tv_nsec = UTIME_OMIT; 846 break; 847 case LINUX_UTIME_NOW: 848 times[0].tv_nsec = UTIME_NOW; 849 break; 850 default: 851 times[0].tv_nsec = l_times[0].tv_nsec; 852 } 853 854 times[1].tv_sec = l_times[1].tv_sec; 855 switch (l_times[1].tv_nsec) 856 { 857 case LINUX_UTIME_OMIT: 858 times[1].tv_nsec = UTIME_OMIT; 859 break; 860 case LINUX_UTIME_NOW: 861 times[1].tv_nsec = UTIME_NOW; 862 break; 863 default: 864 times[1].tv_nsec = l_times[1].tv_nsec; 865 break; 866 } 867 timesp = times; 868 869 /* This breaks POSIX, but is what the Linux kernel does 870 * _on purpose_ (documented in the man page for utimensat(2)), 871 * so we must follow that behaviour. */ 872 if (times[0].tv_nsec == UTIME_OMIT && 873 times[1].tv_nsec == UTIME_OMIT) 874 return (0); 875 } 876 877 if (args->pathname != NULL) 878 LCONVPATHEXIST_AT(td, args->pathname, &path, dfd); 879 else if (args->flags != 0) 880 return (EINVAL); 881 882 if (args->flags & LINUX_AT_SYMLINK_NOFOLLOW) 883 flags |= AT_SYMLINK_NOFOLLOW; 884 885 if (path == NULL) 886 error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE); 887 else { 888 error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp, 889 UIO_SYSSPACE, flags); 890 LFREEPATH(path); 891 } 892 893 return (error); 894 } 895 896 #ifdef LINUX_LEGACY_SYSCALLS 897 int 898 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 899 { 900 l_timeval ltv[2]; 901 struct timeval tv[2], *tvp = NULL; 902 char *fname; 903 int error, dfd; 904 bool convpath; 905 906 convpath = LUSECONVPATH(td); 907 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 908 if (convpath) 909 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 910 911 if (args->utimes != NULL) { 912 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 913 if (convpath) 914 LFREEPATH(fname); 915 return (error); 916 } 917 tv[0].tv_sec = ltv[0].tv_sec; 918 tv[0].tv_usec = ltv[0].tv_usec; 919 tv[1].tv_sec = ltv[1].tv_sec; 920 tv[1].tv_usec = ltv[1].tv_usec; 921 tvp = tv; 922 } 923 924 if (!convpath) { 925 error = kern_utimesat(td, dfd, args->filename, UIO_USERSPACE, 926 tvp, UIO_SYSSPACE); 927 } else { 928 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 929 LFREEPATH(fname); 930 } 931 return (error); 932 } 933 #endif 934 935 static int 936 linux_common_wait(struct thread *td, int pid, int *statusp, 937 int options, struct __wrusage *wrup) 938 { 939 siginfo_t siginfo; 940 idtype_t idtype; 941 id_t id; 942 int error, status, tmpstat; 943 944 if (pid == WAIT_ANY) { 945 idtype = P_ALL; 946 id = 0; 947 } else if (pid < 0) { 948 idtype = P_PGID; 949 id = (id_t)-pid; 950 } else { 951 idtype = P_PID; 952 id = (id_t)pid; 953 } 954 955 /* 956 * For backward compatibility we implicitly add flags WEXITED 957 * and WTRAPPED here. 958 */ 959 options |= WEXITED | WTRAPPED; 960 error = kern_wait6(td, idtype, id, &status, options, wrup, &siginfo); 961 if (error) 962 return (error); 963 964 if (statusp) { 965 tmpstat = status & 0xffff; 966 if (WIFSIGNALED(tmpstat)) { 967 tmpstat = (tmpstat & 0xffffff80) | 968 bsd_to_linux_signal(WTERMSIG(tmpstat)); 969 } else if (WIFSTOPPED(tmpstat)) { 970 tmpstat = (tmpstat & 0xffff00ff) | 971 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8); 972 #if defined(__amd64__) && !defined(COMPAT_LINUX32) 973 if (WSTOPSIG(status) == SIGTRAP) { 974 tmpstat = linux_ptrace_status(td, 975 siginfo.si_pid, tmpstat); 976 } 977 #endif 978 } else if (WIFCONTINUED(tmpstat)) { 979 tmpstat = 0xffff; 980 } 981 error = copyout(&tmpstat, statusp, sizeof(int)); 982 } 983 984 return (error); 985 } 986 987 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 988 int 989 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 990 { 991 struct linux_wait4_args wait4_args; 992 993 wait4_args.pid = args->pid; 994 wait4_args.status = args->status; 995 wait4_args.options = args->options; 996 wait4_args.rusage = NULL; 997 998 return (linux_wait4(td, &wait4_args)); 999 } 1000 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1001 1002 int 1003 linux_wait4(struct thread *td, struct linux_wait4_args *args) 1004 { 1005 int error, options; 1006 struct __wrusage wru, *wrup; 1007 1008 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG | 1009 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 1010 return (EINVAL); 1011 1012 options = WEXITED; 1013 linux_to_bsd_waitopts(args->options, &options); 1014 1015 if (args->rusage != NULL) 1016 wrup = &wru; 1017 else 1018 wrup = NULL; 1019 error = linux_common_wait(td, args->pid, args->status, options, wrup); 1020 if (error != 0) 1021 return (error); 1022 if (args->rusage != NULL) 1023 error = linux_copyout_rusage(&wru.wru_self, args->rusage); 1024 return (error); 1025 } 1026 1027 int 1028 linux_waitid(struct thread *td, struct linux_waitid_args *args) 1029 { 1030 int status, options, sig; 1031 struct __wrusage wru; 1032 siginfo_t siginfo; 1033 l_siginfo_t lsi; 1034 idtype_t idtype; 1035 struct proc *p; 1036 int error; 1037 1038 options = 0; 1039 linux_to_bsd_waitopts(args->options, &options); 1040 1041 if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED)) 1042 return (EINVAL); 1043 if (!(options & (WEXITED | WUNTRACED | WCONTINUED))) 1044 return (EINVAL); 1045 1046 switch (args->idtype) { 1047 case LINUX_P_ALL: 1048 idtype = P_ALL; 1049 break; 1050 case LINUX_P_PID: 1051 if (args->id <= 0) 1052 return (EINVAL); 1053 idtype = P_PID; 1054 break; 1055 case LINUX_P_PGID: 1056 if (args->id <= 0) 1057 return (EINVAL); 1058 idtype = P_PGID; 1059 break; 1060 default: 1061 return (EINVAL); 1062 } 1063 1064 error = kern_wait6(td, idtype, args->id, &status, options, 1065 &wru, &siginfo); 1066 if (error != 0) 1067 return (error); 1068 if (args->rusage != NULL) { 1069 error = linux_copyout_rusage(&wru.wru_children, 1070 args->rusage); 1071 if (error != 0) 1072 return (error); 1073 } 1074 if (args->info != NULL) { 1075 p = td->td_proc; 1076 bzero(&lsi, sizeof(lsi)); 1077 if (td->td_retval[0] != 0) { 1078 sig = bsd_to_linux_signal(siginfo.si_signo); 1079 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 1080 } 1081 error = copyout(&lsi, args->info, sizeof(lsi)); 1082 } 1083 td->td_retval[0] = 0; 1084 1085 return (error); 1086 } 1087 1088 #ifdef LINUX_LEGACY_SYSCALLS 1089 int 1090 linux_mknod(struct thread *td, struct linux_mknod_args *args) 1091 { 1092 char *path; 1093 int error; 1094 enum uio_seg seg; 1095 bool convpath; 1096 1097 convpath = LUSECONVPATH(td); 1098 if (!convpath) { 1099 path = args->path; 1100 seg = UIO_USERSPACE; 1101 } else { 1102 LCONVPATHCREAT(td, args->path, &path); 1103 seg = UIO_SYSSPACE; 1104 } 1105 1106 switch (args->mode & S_IFMT) { 1107 case S_IFIFO: 1108 case S_IFSOCK: 1109 error = kern_mkfifoat(td, AT_FDCWD, path, seg, 1110 args->mode); 1111 break; 1112 1113 case S_IFCHR: 1114 case S_IFBLK: 1115 error = kern_mknodat(td, AT_FDCWD, path, seg, 1116 args->mode, args->dev); 1117 break; 1118 1119 case S_IFDIR: 1120 error = EPERM; 1121 break; 1122 1123 case 0: 1124 args->mode |= S_IFREG; 1125 /* FALLTHROUGH */ 1126 case S_IFREG: 1127 error = kern_openat(td, AT_FDCWD, path, seg, 1128 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1129 if (error == 0) 1130 kern_close(td, td->td_retval[0]); 1131 break; 1132 1133 default: 1134 error = EINVAL; 1135 break; 1136 } 1137 if (convpath) 1138 LFREEPATH(path); 1139 return (error); 1140 } 1141 #endif 1142 1143 int 1144 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1145 { 1146 char *path; 1147 int error, dfd; 1148 enum uio_seg seg; 1149 bool convpath; 1150 1151 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1152 1153 convpath = LUSECONVPATH(td); 1154 if (!convpath) { 1155 path = __DECONST(char *, args->filename); 1156 seg = UIO_USERSPACE; 1157 } else { 1158 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 1159 seg = UIO_SYSSPACE; 1160 } 1161 1162 switch (args->mode & S_IFMT) { 1163 case S_IFIFO: 1164 case S_IFSOCK: 1165 error = kern_mkfifoat(td, dfd, path, seg, args->mode); 1166 break; 1167 1168 case S_IFCHR: 1169 case S_IFBLK: 1170 error = kern_mknodat(td, dfd, path, seg, args->mode, 1171 args->dev); 1172 break; 1173 1174 case S_IFDIR: 1175 error = EPERM; 1176 break; 1177 1178 case 0: 1179 args->mode |= S_IFREG; 1180 /* FALLTHROUGH */ 1181 case S_IFREG: 1182 error = kern_openat(td, dfd, path, seg, 1183 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1184 if (error == 0) 1185 kern_close(td, td->td_retval[0]); 1186 break; 1187 1188 default: 1189 error = EINVAL; 1190 break; 1191 } 1192 if (convpath) 1193 LFREEPATH(path); 1194 return (error); 1195 } 1196 1197 /* 1198 * UGH! This is just about the dumbest idea I've ever heard!! 1199 */ 1200 int 1201 linux_personality(struct thread *td, struct linux_personality_args *args) 1202 { 1203 struct linux_pemuldata *pem; 1204 struct proc *p = td->td_proc; 1205 uint32_t old; 1206 1207 PROC_LOCK(p); 1208 pem = pem_find(p); 1209 old = pem->persona; 1210 if (args->per != 0xffffffff) 1211 pem->persona = args->per; 1212 PROC_UNLOCK(p); 1213 1214 td->td_retval[0] = old; 1215 return (0); 1216 } 1217 1218 struct l_itimerval { 1219 l_timeval it_interval; 1220 l_timeval it_value; 1221 }; 1222 1223 #define B2L_ITIMERVAL(bip, lip) \ 1224 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1225 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1226 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1227 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1228 1229 int 1230 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1231 { 1232 int error; 1233 struct l_itimerval ls; 1234 struct itimerval aitv, oitv; 1235 1236 if (uap->itv == NULL) { 1237 uap->itv = uap->oitv; 1238 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1239 } 1240 1241 error = copyin(uap->itv, &ls, sizeof(ls)); 1242 if (error != 0) 1243 return (error); 1244 B2L_ITIMERVAL(&aitv, &ls); 1245 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1246 if (error != 0 || uap->oitv == NULL) 1247 return (error); 1248 B2L_ITIMERVAL(&ls, &oitv); 1249 1250 return (copyout(&ls, uap->oitv, sizeof(ls))); 1251 } 1252 1253 int 1254 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1255 { 1256 int error; 1257 struct l_itimerval ls; 1258 struct itimerval aitv; 1259 1260 error = kern_getitimer(td, uap->which, &aitv); 1261 if (error != 0) 1262 return (error); 1263 B2L_ITIMERVAL(&ls, &aitv); 1264 return (copyout(&ls, uap->itv, sizeof(ls))); 1265 } 1266 1267 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1268 int 1269 linux_nice(struct thread *td, struct linux_nice_args *args) 1270 { 1271 1272 return (kern_setpriority(td, PRIO_PROCESS, 0, args->inc)); 1273 } 1274 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1275 1276 int 1277 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1278 { 1279 struct ucred *newcred, *oldcred; 1280 l_gid_t *linux_gidset; 1281 gid_t *bsd_gidset; 1282 int ngrp, error; 1283 struct proc *p; 1284 1285 ngrp = args->gidsetsize; 1286 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1287 return (EINVAL); 1288 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK); 1289 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1290 if (error) 1291 goto out; 1292 newcred = crget(); 1293 crextend(newcred, ngrp + 1); 1294 p = td->td_proc; 1295 PROC_LOCK(p); 1296 oldcred = p->p_ucred; 1297 crcopy(newcred, oldcred); 1298 1299 /* 1300 * cr_groups[0] holds egid. Setting the whole set from 1301 * the supplied set will cause egid to be changed too. 1302 * Keep cr_groups[0] unchanged to prevent that. 1303 */ 1304 1305 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) { 1306 PROC_UNLOCK(p); 1307 crfree(newcred); 1308 goto out; 1309 } 1310 1311 if (ngrp > 0) { 1312 newcred->cr_ngroups = ngrp + 1; 1313 1314 bsd_gidset = newcred->cr_groups; 1315 ngrp--; 1316 while (ngrp >= 0) { 1317 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1318 ngrp--; 1319 } 1320 } else 1321 newcred->cr_ngroups = 1; 1322 1323 setsugid(p); 1324 proc_set_cred(p, newcred); 1325 PROC_UNLOCK(p); 1326 crfree(oldcred); 1327 error = 0; 1328 out: 1329 free(linux_gidset, M_LINUX); 1330 return (error); 1331 } 1332 1333 int 1334 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1335 { 1336 struct ucred *cred; 1337 l_gid_t *linux_gidset; 1338 gid_t *bsd_gidset; 1339 int bsd_gidsetsz, ngrp, error; 1340 1341 cred = td->td_ucred; 1342 bsd_gidset = cred->cr_groups; 1343 bsd_gidsetsz = cred->cr_ngroups - 1; 1344 1345 /* 1346 * cr_groups[0] holds egid. Returning the whole set 1347 * here will cause a duplicate. Exclude cr_groups[0] 1348 * to prevent that. 1349 */ 1350 1351 if ((ngrp = args->gidsetsize) == 0) { 1352 td->td_retval[0] = bsd_gidsetsz; 1353 return (0); 1354 } 1355 1356 if (ngrp < bsd_gidsetsz) 1357 return (EINVAL); 1358 1359 ngrp = 0; 1360 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1361 M_LINUX, M_WAITOK); 1362 while (ngrp < bsd_gidsetsz) { 1363 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1364 ngrp++; 1365 } 1366 1367 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1368 free(linux_gidset, M_LINUX); 1369 if (error) 1370 return (error); 1371 1372 td->td_retval[0] = ngrp; 1373 return (0); 1374 } 1375 1376 static bool 1377 linux_get_dummy_limit(l_uint resource, struct rlimit *rlim) 1378 { 1379 1380 if (linux_dummy_rlimits == 0) 1381 return (false); 1382 1383 switch (resource) { 1384 case LINUX_RLIMIT_LOCKS: 1385 case LINUX_RLIMIT_SIGPENDING: 1386 case LINUX_RLIMIT_MSGQUEUE: 1387 case LINUX_RLIMIT_RTTIME: 1388 rlim->rlim_cur = LINUX_RLIM_INFINITY; 1389 rlim->rlim_max = LINUX_RLIM_INFINITY; 1390 return (true); 1391 case LINUX_RLIMIT_NICE: 1392 case LINUX_RLIMIT_RTPRIO: 1393 rlim->rlim_cur = 0; 1394 rlim->rlim_max = 0; 1395 return (true); 1396 default: 1397 return (false); 1398 } 1399 } 1400 1401 int 1402 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1403 { 1404 struct rlimit bsd_rlim; 1405 struct l_rlimit rlim; 1406 u_int which; 1407 int error; 1408 1409 if (args->resource >= LINUX_RLIM_NLIMITS) 1410 return (EINVAL); 1411 1412 which = linux_to_bsd_resource[args->resource]; 1413 if (which == -1) 1414 return (EINVAL); 1415 1416 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1417 if (error) 1418 return (error); 1419 1420 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1421 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1422 return (kern_setrlimit(td, which, &bsd_rlim)); 1423 } 1424 1425 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1426 int 1427 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1428 { 1429 struct l_rlimit rlim; 1430 struct rlimit bsd_rlim; 1431 u_int which; 1432 1433 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1434 rlim.rlim_cur = bsd_rlim.rlim_cur; 1435 rlim.rlim_max = bsd_rlim.rlim_max; 1436 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1437 } 1438 1439 if (args->resource >= LINUX_RLIM_NLIMITS) 1440 return (EINVAL); 1441 1442 which = linux_to_bsd_resource[args->resource]; 1443 if (which == -1) 1444 return (EINVAL); 1445 1446 lim_rlimit(td, which, &bsd_rlim); 1447 1448 #ifdef COMPAT_LINUX32 1449 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1450 if (rlim.rlim_cur == UINT_MAX) 1451 rlim.rlim_cur = INT_MAX; 1452 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1453 if (rlim.rlim_max == UINT_MAX) 1454 rlim.rlim_max = INT_MAX; 1455 #else 1456 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1457 if (rlim.rlim_cur == ULONG_MAX) 1458 rlim.rlim_cur = LONG_MAX; 1459 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1460 if (rlim.rlim_max == ULONG_MAX) 1461 rlim.rlim_max = LONG_MAX; 1462 #endif 1463 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1464 } 1465 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1466 1467 int 1468 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1469 { 1470 struct l_rlimit rlim; 1471 struct rlimit bsd_rlim; 1472 u_int which; 1473 1474 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1475 rlim.rlim_cur = bsd_rlim.rlim_cur; 1476 rlim.rlim_max = bsd_rlim.rlim_max; 1477 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1478 } 1479 1480 if (args->resource >= LINUX_RLIM_NLIMITS) 1481 return (EINVAL); 1482 1483 which = linux_to_bsd_resource[args->resource]; 1484 if (which == -1) 1485 return (EINVAL); 1486 1487 lim_rlimit(td, which, &bsd_rlim); 1488 1489 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1490 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1491 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1492 } 1493 1494 int 1495 linux_sched_setscheduler(struct thread *td, 1496 struct linux_sched_setscheduler_args *args) 1497 { 1498 struct sched_param sched_param; 1499 struct thread *tdt; 1500 int error, policy; 1501 1502 switch (args->policy) { 1503 case LINUX_SCHED_OTHER: 1504 policy = SCHED_OTHER; 1505 break; 1506 case LINUX_SCHED_FIFO: 1507 policy = SCHED_FIFO; 1508 break; 1509 case LINUX_SCHED_RR: 1510 policy = SCHED_RR; 1511 break; 1512 default: 1513 return (EINVAL); 1514 } 1515 1516 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1517 if (error) 1518 return (error); 1519 1520 if (linux_map_sched_prio) { 1521 switch (policy) { 1522 case SCHED_OTHER: 1523 if (sched_param.sched_priority != 0) 1524 return (EINVAL); 1525 1526 sched_param.sched_priority = 1527 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 1528 break; 1529 case SCHED_FIFO: 1530 case SCHED_RR: 1531 if (sched_param.sched_priority < 1 || 1532 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) 1533 return (EINVAL); 1534 1535 /* 1536 * Map [1, LINUX_MAX_RT_PRIO - 1] to 1537 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 1538 */ 1539 sched_param.sched_priority = 1540 (sched_param.sched_priority - 1) * 1541 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 1542 (LINUX_MAX_RT_PRIO - 1); 1543 break; 1544 } 1545 } 1546 1547 tdt = linux_tdfind(td, args->pid, -1); 1548 if (tdt == NULL) 1549 return (ESRCH); 1550 1551 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1552 PROC_UNLOCK(tdt->td_proc); 1553 return (error); 1554 } 1555 1556 int 1557 linux_sched_getscheduler(struct thread *td, 1558 struct linux_sched_getscheduler_args *args) 1559 { 1560 struct thread *tdt; 1561 int error, policy; 1562 1563 tdt = linux_tdfind(td, args->pid, -1); 1564 if (tdt == NULL) 1565 return (ESRCH); 1566 1567 error = kern_sched_getscheduler(td, tdt, &policy); 1568 PROC_UNLOCK(tdt->td_proc); 1569 1570 switch (policy) { 1571 case SCHED_OTHER: 1572 td->td_retval[0] = LINUX_SCHED_OTHER; 1573 break; 1574 case SCHED_FIFO: 1575 td->td_retval[0] = LINUX_SCHED_FIFO; 1576 break; 1577 case SCHED_RR: 1578 td->td_retval[0] = LINUX_SCHED_RR; 1579 break; 1580 } 1581 return (error); 1582 } 1583 1584 int 1585 linux_sched_get_priority_max(struct thread *td, 1586 struct linux_sched_get_priority_max_args *args) 1587 { 1588 struct sched_get_priority_max_args bsd; 1589 1590 if (linux_map_sched_prio) { 1591 switch (args->policy) { 1592 case LINUX_SCHED_OTHER: 1593 td->td_retval[0] = 0; 1594 return (0); 1595 case LINUX_SCHED_FIFO: 1596 case LINUX_SCHED_RR: 1597 td->td_retval[0] = LINUX_MAX_RT_PRIO - 1; 1598 return (0); 1599 default: 1600 return (EINVAL); 1601 } 1602 } 1603 1604 switch (args->policy) { 1605 case LINUX_SCHED_OTHER: 1606 bsd.policy = SCHED_OTHER; 1607 break; 1608 case LINUX_SCHED_FIFO: 1609 bsd.policy = SCHED_FIFO; 1610 break; 1611 case LINUX_SCHED_RR: 1612 bsd.policy = SCHED_RR; 1613 break; 1614 default: 1615 return (EINVAL); 1616 } 1617 return (sys_sched_get_priority_max(td, &bsd)); 1618 } 1619 1620 int 1621 linux_sched_get_priority_min(struct thread *td, 1622 struct linux_sched_get_priority_min_args *args) 1623 { 1624 struct sched_get_priority_min_args bsd; 1625 1626 if (linux_map_sched_prio) { 1627 switch (args->policy) { 1628 case LINUX_SCHED_OTHER: 1629 td->td_retval[0] = 0; 1630 return (0); 1631 case LINUX_SCHED_FIFO: 1632 case LINUX_SCHED_RR: 1633 td->td_retval[0] = 1; 1634 return (0); 1635 default: 1636 return (EINVAL); 1637 } 1638 } 1639 1640 switch (args->policy) { 1641 case LINUX_SCHED_OTHER: 1642 bsd.policy = SCHED_OTHER; 1643 break; 1644 case LINUX_SCHED_FIFO: 1645 bsd.policy = SCHED_FIFO; 1646 break; 1647 case LINUX_SCHED_RR: 1648 bsd.policy = SCHED_RR; 1649 break; 1650 default: 1651 return (EINVAL); 1652 } 1653 return (sys_sched_get_priority_min(td, &bsd)); 1654 } 1655 1656 #define REBOOT_CAD_ON 0x89abcdef 1657 #define REBOOT_CAD_OFF 0 1658 #define REBOOT_HALT 0xcdef0123 1659 #define REBOOT_RESTART 0x01234567 1660 #define REBOOT_RESTART2 0xA1B2C3D4 1661 #define REBOOT_POWEROFF 0x4321FEDC 1662 #define REBOOT_MAGIC1 0xfee1dead 1663 #define REBOOT_MAGIC2 0x28121969 1664 #define REBOOT_MAGIC2A 0x05121996 1665 #define REBOOT_MAGIC2B 0x16041998 1666 1667 int 1668 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1669 { 1670 struct reboot_args bsd_args; 1671 1672 if (args->magic1 != REBOOT_MAGIC1) 1673 return (EINVAL); 1674 1675 switch (args->magic2) { 1676 case REBOOT_MAGIC2: 1677 case REBOOT_MAGIC2A: 1678 case REBOOT_MAGIC2B: 1679 break; 1680 default: 1681 return (EINVAL); 1682 } 1683 1684 switch (args->cmd) { 1685 case REBOOT_CAD_ON: 1686 case REBOOT_CAD_OFF: 1687 return (priv_check(td, PRIV_REBOOT)); 1688 case REBOOT_HALT: 1689 bsd_args.opt = RB_HALT; 1690 break; 1691 case REBOOT_RESTART: 1692 case REBOOT_RESTART2: 1693 bsd_args.opt = 0; 1694 break; 1695 case REBOOT_POWEROFF: 1696 bsd_args.opt = RB_POWEROFF; 1697 break; 1698 default: 1699 return (EINVAL); 1700 } 1701 return (sys_reboot(td, &bsd_args)); 1702 } 1703 1704 int 1705 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1706 { 1707 1708 td->td_retval[0] = td->td_proc->p_pid; 1709 1710 return (0); 1711 } 1712 1713 int 1714 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1715 { 1716 struct linux_emuldata *em; 1717 1718 em = em_find(td); 1719 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1720 1721 td->td_retval[0] = em->em_tid; 1722 1723 return (0); 1724 } 1725 1726 int 1727 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1728 { 1729 1730 td->td_retval[0] = kern_getppid(td); 1731 return (0); 1732 } 1733 1734 int 1735 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1736 { 1737 1738 td->td_retval[0] = td->td_ucred->cr_rgid; 1739 return (0); 1740 } 1741 1742 int 1743 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1744 { 1745 1746 td->td_retval[0] = td->td_ucred->cr_ruid; 1747 return (0); 1748 } 1749 1750 int 1751 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1752 { 1753 1754 return (kern_getsid(td, args->pid)); 1755 } 1756 1757 int 1758 linux_nosys(struct thread *td, struct nosys_args *ignore) 1759 { 1760 1761 return (ENOSYS); 1762 } 1763 1764 int 1765 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1766 { 1767 int error; 1768 1769 error = kern_getpriority(td, args->which, args->who); 1770 td->td_retval[0] = 20 - td->td_retval[0]; 1771 return (error); 1772 } 1773 1774 int 1775 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1776 { 1777 int name[2]; 1778 1779 name[0] = CTL_KERN; 1780 name[1] = KERN_HOSTNAME; 1781 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1782 args->len, 0, 0)); 1783 } 1784 1785 int 1786 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1787 { 1788 int name[2]; 1789 1790 name[0] = CTL_KERN; 1791 name[1] = KERN_NISDOMAINNAME; 1792 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1793 args->len, 0, 0)); 1794 } 1795 1796 int 1797 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1798 { 1799 1800 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1801 args->error_code); 1802 1803 /* 1804 * XXX: we should send a signal to the parent if 1805 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1806 * as it doesnt occur often. 1807 */ 1808 exit1(td, args->error_code, 0); 1809 /* NOTREACHED */ 1810 } 1811 1812 #define _LINUX_CAPABILITY_VERSION_1 0x19980330 1813 #define _LINUX_CAPABILITY_VERSION_2 0x20071026 1814 #define _LINUX_CAPABILITY_VERSION_3 0x20080522 1815 1816 struct l_user_cap_header { 1817 l_int version; 1818 l_int pid; 1819 }; 1820 1821 struct l_user_cap_data { 1822 l_int effective; 1823 l_int permitted; 1824 l_int inheritable; 1825 }; 1826 1827 int 1828 linux_capget(struct thread *td, struct linux_capget_args *uap) 1829 { 1830 struct l_user_cap_header luch; 1831 struct l_user_cap_data lucd[2]; 1832 int error, u32s; 1833 1834 if (uap->hdrp == NULL) 1835 return (EFAULT); 1836 1837 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1838 if (error != 0) 1839 return (error); 1840 1841 switch (luch.version) { 1842 case _LINUX_CAPABILITY_VERSION_1: 1843 u32s = 1; 1844 break; 1845 case _LINUX_CAPABILITY_VERSION_2: 1846 case _LINUX_CAPABILITY_VERSION_3: 1847 u32s = 2; 1848 break; 1849 default: 1850 luch.version = _LINUX_CAPABILITY_VERSION_1; 1851 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1852 if (error) 1853 return (error); 1854 return (EINVAL); 1855 } 1856 1857 if (luch.pid) 1858 return (EPERM); 1859 1860 if (uap->datap) { 1861 /* 1862 * The current implementation doesn't support setting 1863 * a capability (it's essentially a stub) so indicate 1864 * that no capabilities are currently set or available 1865 * to request. 1866 */ 1867 memset(&lucd, 0, u32s * sizeof(lucd[0])); 1868 error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0])); 1869 } 1870 1871 return (error); 1872 } 1873 1874 int 1875 linux_capset(struct thread *td, struct linux_capset_args *uap) 1876 { 1877 struct l_user_cap_header luch; 1878 struct l_user_cap_data lucd[2]; 1879 int error, i, u32s; 1880 1881 if (uap->hdrp == NULL || uap->datap == NULL) 1882 return (EFAULT); 1883 1884 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1885 if (error != 0) 1886 return (error); 1887 1888 switch (luch.version) { 1889 case _LINUX_CAPABILITY_VERSION_1: 1890 u32s = 1; 1891 break; 1892 case _LINUX_CAPABILITY_VERSION_2: 1893 case _LINUX_CAPABILITY_VERSION_3: 1894 u32s = 2; 1895 break; 1896 default: 1897 luch.version = _LINUX_CAPABILITY_VERSION_1; 1898 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1899 if (error) 1900 return (error); 1901 return (EINVAL); 1902 } 1903 1904 if (luch.pid) 1905 return (EPERM); 1906 1907 error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0])); 1908 if (error != 0) 1909 return (error); 1910 1911 /* We currently don't support setting any capabilities. */ 1912 for (i = 0; i < u32s; i++) { 1913 if (lucd[i].effective || lucd[i].permitted || 1914 lucd[i].inheritable) { 1915 linux_msg(td, 1916 "capset[%d] effective=0x%x, permitted=0x%x, " 1917 "inheritable=0x%x is not implemented", i, 1918 (int)lucd[i].effective, (int)lucd[i].permitted, 1919 (int)lucd[i].inheritable); 1920 return (EPERM); 1921 } 1922 } 1923 1924 return (0); 1925 } 1926 1927 int 1928 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1929 { 1930 int error = 0, max_size; 1931 struct proc *p = td->td_proc; 1932 char comm[LINUX_MAX_COMM_LEN]; 1933 int pdeath_signal; 1934 1935 switch (args->option) { 1936 case LINUX_PR_SET_PDEATHSIG: 1937 if (!LINUX_SIG_VALID(args->arg2)) 1938 return (EINVAL); 1939 pdeath_signal = linux_to_bsd_signal(args->arg2); 1940 return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL, 1941 &pdeath_signal)); 1942 case LINUX_PR_GET_PDEATHSIG: 1943 error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS, 1944 &pdeath_signal); 1945 if (error != 0) 1946 return (error); 1947 pdeath_signal = bsd_to_linux_signal(pdeath_signal); 1948 return (copyout(&pdeath_signal, 1949 (void *)(register_t)args->arg2, 1950 sizeof(pdeath_signal))); 1951 break; 1952 case LINUX_PR_GET_KEEPCAPS: 1953 /* 1954 * Indicate that we always clear the effective and 1955 * permitted capability sets when the user id becomes 1956 * non-zero (actually the capability sets are simply 1957 * always zero in the current implementation). 1958 */ 1959 td->td_retval[0] = 0; 1960 break; 1961 case LINUX_PR_SET_KEEPCAPS: 1962 /* 1963 * Ignore requests to keep the effective and permitted 1964 * capability sets when the user id becomes non-zero. 1965 */ 1966 break; 1967 case LINUX_PR_SET_NAME: 1968 /* 1969 * To be on the safe side we need to make sure to not 1970 * overflow the size a Linux program expects. We already 1971 * do this here in the copyin, so that we don't need to 1972 * check on copyout. 1973 */ 1974 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1975 error = copyinstr((void *)(register_t)args->arg2, comm, 1976 max_size, NULL); 1977 1978 /* Linux silently truncates the name if it is too long. */ 1979 if (error == ENAMETOOLONG) { 1980 /* 1981 * XXX: copyinstr() isn't documented to populate the 1982 * array completely, so do a copyin() to be on the 1983 * safe side. This should be changed in case 1984 * copyinstr() is changed to guarantee this. 1985 */ 1986 error = copyin((void *)(register_t)args->arg2, comm, 1987 max_size - 1); 1988 comm[max_size - 1] = '\0'; 1989 } 1990 if (error) 1991 return (error); 1992 1993 PROC_LOCK(p); 1994 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1995 PROC_UNLOCK(p); 1996 break; 1997 case LINUX_PR_GET_NAME: 1998 PROC_LOCK(p); 1999 strlcpy(comm, p->p_comm, sizeof(comm)); 2000 PROC_UNLOCK(p); 2001 error = copyout(comm, (void *)(register_t)args->arg2, 2002 strlen(comm) + 1); 2003 break; 2004 case LINUX_PR_GET_SECCOMP: 2005 case LINUX_PR_SET_SECCOMP: 2006 /* 2007 * Same as returned by Linux without CONFIG_SECCOMP enabled. 2008 */ 2009 error = EINVAL; 2010 break; 2011 default: 2012 linux_msg(td, "unsupported prctl option %d", args->option); 2013 error = EINVAL; 2014 break; 2015 } 2016 2017 return (error); 2018 } 2019 2020 int 2021 linux_sched_setparam(struct thread *td, 2022 struct linux_sched_setparam_args *uap) 2023 { 2024 struct sched_param sched_param; 2025 struct thread *tdt; 2026 int error, policy; 2027 2028 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 2029 if (error) 2030 return (error); 2031 2032 tdt = linux_tdfind(td, uap->pid, -1); 2033 if (tdt == NULL) 2034 return (ESRCH); 2035 2036 if (linux_map_sched_prio) { 2037 error = kern_sched_getscheduler(td, tdt, &policy); 2038 if (error) 2039 goto out; 2040 2041 switch (policy) { 2042 case SCHED_OTHER: 2043 if (sched_param.sched_priority != 0) { 2044 error = EINVAL; 2045 goto out; 2046 } 2047 sched_param.sched_priority = 2048 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 2049 break; 2050 case SCHED_FIFO: 2051 case SCHED_RR: 2052 if (sched_param.sched_priority < 1 || 2053 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) { 2054 error = EINVAL; 2055 goto out; 2056 } 2057 /* 2058 * Map [1, LINUX_MAX_RT_PRIO - 1] to 2059 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 2060 */ 2061 sched_param.sched_priority = 2062 (sched_param.sched_priority - 1) * 2063 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 2064 (LINUX_MAX_RT_PRIO - 1); 2065 break; 2066 } 2067 } 2068 2069 error = kern_sched_setparam(td, tdt, &sched_param); 2070 out: PROC_UNLOCK(tdt->td_proc); 2071 return (error); 2072 } 2073 2074 int 2075 linux_sched_getparam(struct thread *td, 2076 struct linux_sched_getparam_args *uap) 2077 { 2078 struct sched_param sched_param; 2079 struct thread *tdt; 2080 int error, policy; 2081 2082 tdt = linux_tdfind(td, uap->pid, -1); 2083 if (tdt == NULL) 2084 return (ESRCH); 2085 2086 error = kern_sched_getparam(td, tdt, &sched_param); 2087 if (error) { 2088 PROC_UNLOCK(tdt->td_proc); 2089 return (error); 2090 } 2091 2092 if (linux_map_sched_prio) { 2093 error = kern_sched_getscheduler(td, tdt, &policy); 2094 PROC_UNLOCK(tdt->td_proc); 2095 if (error) 2096 return (error); 2097 2098 switch (policy) { 2099 case SCHED_OTHER: 2100 sched_param.sched_priority = 0; 2101 break; 2102 case SCHED_FIFO: 2103 case SCHED_RR: 2104 /* 2105 * Map [0, RTP_PRIO_MAX - RTP_PRIO_MIN] to 2106 * [1, LINUX_MAX_RT_PRIO - 1] (rounding up). 2107 */ 2108 sched_param.sched_priority = 2109 (sched_param.sched_priority * 2110 (LINUX_MAX_RT_PRIO - 1) + 2111 (RTP_PRIO_MAX - RTP_PRIO_MIN - 1)) / 2112 (RTP_PRIO_MAX - RTP_PRIO_MIN) + 1; 2113 break; 2114 } 2115 } else 2116 PROC_UNLOCK(tdt->td_proc); 2117 2118 error = copyout(&sched_param, uap->param, sizeof(sched_param)); 2119 return (error); 2120 } 2121 2122 /* 2123 * Get affinity of a process. 2124 */ 2125 int 2126 linux_sched_getaffinity(struct thread *td, 2127 struct linux_sched_getaffinity_args *args) 2128 { 2129 int error; 2130 struct thread *tdt; 2131 2132 if (args->len < sizeof(cpuset_t)) 2133 return (EINVAL); 2134 2135 tdt = linux_tdfind(td, args->pid, -1); 2136 if (tdt == NULL) 2137 return (ESRCH); 2138 2139 PROC_UNLOCK(tdt->td_proc); 2140 2141 error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2142 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr); 2143 if (error == 0) 2144 td->td_retval[0] = sizeof(cpuset_t); 2145 2146 return (error); 2147 } 2148 2149 /* 2150 * Set affinity of a process. 2151 */ 2152 int 2153 linux_sched_setaffinity(struct thread *td, 2154 struct linux_sched_setaffinity_args *args) 2155 { 2156 struct thread *tdt; 2157 2158 if (args->len < sizeof(cpuset_t)) 2159 return (EINVAL); 2160 2161 tdt = linux_tdfind(td, args->pid, -1); 2162 if (tdt == NULL) 2163 return (ESRCH); 2164 2165 PROC_UNLOCK(tdt->td_proc); 2166 2167 return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2168 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr)); 2169 } 2170 2171 struct linux_rlimit64 { 2172 uint64_t rlim_cur; 2173 uint64_t rlim_max; 2174 }; 2175 2176 int 2177 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2178 { 2179 struct rlimit rlim, nrlim; 2180 struct linux_rlimit64 lrlim; 2181 struct proc *p; 2182 u_int which; 2183 int flags; 2184 int error; 2185 2186 if (args->new == NULL && args->old != NULL) { 2187 if (linux_get_dummy_limit(args->resource, &rlim)) { 2188 lrlim.rlim_cur = rlim.rlim_cur; 2189 lrlim.rlim_max = rlim.rlim_max; 2190 return (copyout(&lrlim, args->old, sizeof(lrlim))); 2191 } 2192 } 2193 2194 if (args->resource >= LINUX_RLIM_NLIMITS) 2195 return (EINVAL); 2196 2197 which = linux_to_bsd_resource[args->resource]; 2198 if (which == -1) 2199 return (EINVAL); 2200 2201 if (args->new != NULL) { 2202 /* 2203 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2204 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2205 * as INFINITY so we do not need a conversion even. 2206 */ 2207 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2208 if (error != 0) 2209 return (error); 2210 } 2211 2212 flags = PGET_HOLD | PGET_NOTWEXIT; 2213 if (args->new != NULL) 2214 flags |= PGET_CANDEBUG; 2215 else 2216 flags |= PGET_CANSEE; 2217 if (args->pid == 0) { 2218 p = td->td_proc; 2219 PHOLD(p); 2220 } else { 2221 error = pget(args->pid, flags, &p); 2222 if (error != 0) 2223 return (error); 2224 } 2225 if (args->old != NULL) { 2226 PROC_LOCK(p); 2227 lim_rlimit_proc(p, which, &rlim); 2228 PROC_UNLOCK(p); 2229 if (rlim.rlim_cur == RLIM_INFINITY) 2230 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2231 else 2232 lrlim.rlim_cur = rlim.rlim_cur; 2233 if (rlim.rlim_max == RLIM_INFINITY) 2234 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2235 else 2236 lrlim.rlim_max = rlim.rlim_max; 2237 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2238 if (error != 0) 2239 goto out; 2240 } 2241 2242 if (args->new != NULL) 2243 error = kern_proc_setrlimit(td, p, which, &nrlim); 2244 2245 out: 2246 PRELE(p); 2247 return (error); 2248 } 2249 2250 int 2251 linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2252 { 2253 struct timeval utv, tv0, tv1, *tvp; 2254 struct l_pselect6arg lpse6; 2255 struct l_timespec lts; 2256 struct timespec uts; 2257 l_sigset_t l_ss; 2258 sigset_t *ssp; 2259 sigset_t ss; 2260 int error; 2261 2262 ssp = NULL; 2263 if (args->sig != NULL) { 2264 error = copyin(args->sig, &lpse6, sizeof(lpse6)); 2265 if (error != 0) 2266 return (error); 2267 if (lpse6.ss_len != sizeof(l_ss)) 2268 return (EINVAL); 2269 if (lpse6.ss != 0) { 2270 error = copyin(PTRIN(lpse6.ss), &l_ss, 2271 sizeof(l_ss)); 2272 if (error != 0) 2273 return (error); 2274 linux_to_bsd_sigset(&l_ss, &ss); 2275 ssp = &ss; 2276 } 2277 } 2278 2279 /* 2280 * Currently glibc changes nanosecond number to microsecond. 2281 * This mean losing precision but for now it is hardly seen. 2282 */ 2283 if (args->tsp != NULL) { 2284 error = copyin(args->tsp, <s, sizeof(lts)); 2285 if (error != 0) 2286 return (error); 2287 error = linux_to_native_timespec(&uts, <s); 2288 if (error != 0) 2289 return (error); 2290 2291 TIMESPEC_TO_TIMEVAL(&utv, &uts); 2292 if (itimerfix(&utv)) 2293 return (EINVAL); 2294 2295 microtime(&tv0); 2296 tvp = &utv; 2297 } else 2298 tvp = NULL; 2299 2300 error = kern_pselect(td, args->nfds, args->readfds, args->writefds, 2301 args->exceptfds, tvp, ssp, LINUX_NFDBITS); 2302 2303 if (error == 0 && args->tsp != NULL) { 2304 if (td->td_retval[0] != 0) { 2305 /* 2306 * Compute how much time was left of the timeout, 2307 * by subtracting the current time and the time 2308 * before we started the call, and subtracting 2309 * that result from the user-supplied value. 2310 */ 2311 2312 microtime(&tv1); 2313 timevalsub(&tv1, &tv0); 2314 timevalsub(&utv, &tv1); 2315 if (utv.tv_sec < 0) 2316 timevalclear(&utv); 2317 } else 2318 timevalclear(&utv); 2319 2320 TIMEVAL_TO_TIMESPEC(&utv, &uts); 2321 2322 error = native_to_linux_timespec(<s, &uts); 2323 if (error == 0) 2324 error = copyout(<s, args->tsp, sizeof(lts)); 2325 } 2326 2327 return (error); 2328 } 2329 2330 int 2331 linux_ppoll(struct thread *td, struct linux_ppoll_args *args) 2332 { 2333 struct timespec ts0, ts1; 2334 struct l_timespec lts; 2335 struct timespec uts, *tsp; 2336 l_sigset_t l_ss; 2337 sigset_t *ssp; 2338 sigset_t ss; 2339 int error; 2340 2341 if (args->sset != NULL) { 2342 if (args->ssize != sizeof(l_ss)) 2343 return (EINVAL); 2344 error = copyin(args->sset, &l_ss, sizeof(l_ss)); 2345 if (error) 2346 return (error); 2347 linux_to_bsd_sigset(&l_ss, &ss); 2348 ssp = &ss; 2349 } else 2350 ssp = NULL; 2351 if (args->tsp != NULL) { 2352 error = copyin(args->tsp, <s, sizeof(lts)); 2353 if (error) 2354 return (error); 2355 error = linux_to_native_timespec(&uts, <s); 2356 if (error != 0) 2357 return (error); 2358 2359 nanotime(&ts0); 2360 tsp = &uts; 2361 } else 2362 tsp = NULL; 2363 2364 error = kern_poll(td, args->fds, args->nfds, tsp, ssp); 2365 2366 if (error == 0 && args->tsp != NULL) { 2367 if (td->td_retval[0]) { 2368 nanotime(&ts1); 2369 timespecsub(&ts1, &ts0, &ts1); 2370 timespecsub(&uts, &ts1, &uts); 2371 if (uts.tv_sec < 0) 2372 timespecclear(&uts); 2373 } else 2374 timespecclear(&uts); 2375 2376 error = native_to_linux_timespec(<s, &uts); 2377 if (error == 0) 2378 error = copyout(<s, args->tsp, sizeof(lts)); 2379 } 2380 2381 return (error); 2382 } 2383 2384 int 2385 linux_sched_rr_get_interval(struct thread *td, 2386 struct linux_sched_rr_get_interval_args *uap) 2387 { 2388 struct timespec ts; 2389 struct l_timespec lts; 2390 struct thread *tdt; 2391 int error; 2392 2393 /* 2394 * According to man in case the invalid pid specified 2395 * EINVAL should be returned. 2396 */ 2397 if (uap->pid < 0) 2398 return (EINVAL); 2399 2400 tdt = linux_tdfind(td, uap->pid, -1); 2401 if (tdt == NULL) 2402 return (ESRCH); 2403 2404 error = kern_sched_rr_get_interval_td(td, tdt, &ts); 2405 PROC_UNLOCK(tdt->td_proc); 2406 if (error != 0) 2407 return (error); 2408 error = native_to_linux_timespec(<s, &ts); 2409 if (error != 0) 2410 return (error); 2411 return (copyout(<s, uap->interval, sizeof(lts))); 2412 } 2413 2414 /* 2415 * In case when the Linux thread is the initial thread in 2416 * the thread group thread id is equal to the process id. 2417 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2418 */ 2419 struct thread * 2420 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2421 { 2422 struct linux_emuldata *em; 2423 struct thread *tdt; 2424 struct proc *p; 2425 2426 tdt = NULL; 2427 if (tid == 0 || tid == td->td_tid) { 2428 tdt = td; 2429 PROC_LOCK(tdt->td_proc); 2430 } else if (tid > PID_MAX) 2431 tdt = tdfind(tid, pid); 2432 else { 2433 /* 2434 * Initial thread where the tid equal to the pid. 2435 */ 2436 p = pfind(tid); 2437 if (p != NULL) { 2438 if (SV_PROC_ABI(p) != SV_ABI_LINUX) { 2439 /* 2440 * p is not a Linuxulator process. 2441 */ 2442 PROC_UNLOCK(p); 2443 return (NULL); 2444 } 2445 FOREACH_THREAD_IN_PROC(p, tdt) { 2446 em = em_find(tdt); 2447 if (tid == em->em_tid) 2448 return (tdt); 2449 } 2450 PROC_UNLOCK(p); 2451 } 2452 return (NULL); 2453 } 2454 2455 return (tdt); 2456 } 2457 2458 void 2459 linux_to_bsd_waitopts(int options, int *bsdopts) 2460 { 2461 2462 if (options & LINUX_WNOHANG) 2463 *bsdopts |= WNOHANG; 2464 if (options & LINUX_WUNTRACED) 2465 *bsdopts |= WUNTRACED; 2466 if (options & LINUX_WEXITED) 2467 *bsdopts |= WEXITED; 2468 if (options & LINUX_WCONTINUED) 2469 *bsdopts |= WCONTINUED; 2470 if (options & LINUX_WNOWAIT) 2471 *bsdopts |= WNOWAIT; 2472 2473 if (options & __WCLONE) 2474 *bsdopts |= WLINUXCLONE; 2475 } 2476 2477 int 2478 linux_getrandom(struct thread *td, struct linux_getrandom_args *args) 2479 { 2480 struct uio uio; 2481 struct iovec iov; 2482 int error; 2483 2484 if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM)) 2485 return (EINVAL); 2486 if (args->count > INT_MAX) 2487 args->count = INT_MAX; 2488 2489 iov.iov_base = args->buf; 2490 iov.iov_len = args->count; 2491 2492 uio.uio_iov = &iov; 2493 uio.uio_iovcnt = 1; 2494 uio.uio_resid = iov.iov_len; 2495 uio.uio_segflg = UIO_USERSPACE; 2496 uio.uio_rw = UIO_READ; 2497 uio.uio_td = td; 2498 2499 error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK); 2500 if (error == 0) 2501 td->td_retval[0] = args->count - uio.uio_resid; 2502 return (error); 2503 } 2504 2505 int 2506 linux_mincore(struct thread *td, struct linux_mincore_args *args) 2507 { 2508 2509 /* Needs to be page-aligned */ 2510 if (args->start & PAGE_MASK) 2511 return (EINVAL); 2512 return (kern_mincore(td, args->start, args->len, args->vec)); 2513 } 2514 2515 #define SYSLOG_TAG "<6>" 2516 2517 int 2518 linux_syslog(struct thread *td, struct linux_syslog_args *args) 2519 { 2520 char buf[128], *src, *dst; 2521 u_int seq; 2522 int buflen, error; 2523 2524 if (args->type != LINUX_SYSLOG_ACTION_READ_ALL) { 2525 linux_msg(td, "syslog unsupported type 0x%x", args->type); 2526 return (EINVAL); 2527 } 2528 2529 if (args->len < 6) { 2530 td->td_retval[0] = 0; 2531 return (0); 2532 } 2533 2534 error = priv_check(td, PRIV_MSGBUF); 2535 if (error) 2536 return (error); 2537 2538 mtx_lock(&msgbuf_lock); 2539 msgbuf_peekbytes(msgbufp, NULL, 0, &seq); 2540 mtx_unlock(&msgbuf_lock); 2541 2542 dst = args->buf; 2543 error = copyout(&SYSLOG_TAG, dst, sizeof(SYSLOG_TAG)); 2544 /* The -1 is to skip the trailing '\0'. */ 2545 dst += sizeof(SYSLOG_TAG) - 1; 2546 2547 while (error == 0) { 2548 mtx_lock(&msgbuf_lock); 2549 buflen = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq); 2550 mtx_unlock(&msgbuf_lock); 2551 2552 if (buflen == 0) 2553 break; 2554 2555 for (src = buf; src < buf + buflen && error == 0; src++) { 2556 if (*src == '\0') 2557 continue; 2558 2559 if (dst >= args->buf + args->len) 2560 goto out; 2561 2562 error = copyout(src, dst, 1); 2563 dst++; 2564 2565 if (*src == '\n' && *(src + 1) != '<' && 2566 dst + sizeof(SYSLOG_TAG) < args->buf + args->len) { 2567 error = copyout(&SYSLOG_TAG, 2568 dst, sizeof(SYSLOG_TAG)); 2569 dst += sizeof(SYSLOG_TAG) - 1; 2570 } 2571 } 2572 } 2573 out: 2574 td->td_retval[0] = dst - args->buf; 2575 return (error); 2576 } 2577 2578 int 2579 linux_getcpu(struct thread *td, struct linux_getcpu_args *args) 2580 { 2581 int cpu, error, node; 2582 2583 cpu = td->td_oncpu; /* Make sure it doesn't change during copyout(9) */ 2584 error = 0; 2585 node = cpuid_to_pcpu[cpu]->pc_domain; 2586 2587 if (args->cpu != NULL) 2588 error = copyout(&cpu, args->cpu, sizeof(l_int)); 2589 if (args->node != NULL) 2590 error = copyout(&node, args->node, sizeof(l_int)); 2591 return (error); 2592 } 2593