1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Doug Rabson 5 * Copyright (c) 1994-1995 Søren Schmidt 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/blist.h> 39 #include <sys/fcntl.h> 40 #if defined(__i386__) 41 #include <sys/imgact_aout.h> 42 #endif 43 #include <sys/jail.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mman.h> 49 #include <sys/mount.h> 50 #include <sys/msgbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/namei.h> 53 #include <sys/poll.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/procctl.h> 57 #include <sys/reboot.h> 58 #include <sys/racct.h> 59 #include <sys/random.h> 60 #include <sys/resourcevar.h> 61 #include <sys/sched.h> 62 #include <sys/sdt.h> 63 #include <sys/signalvar.h> 64 #include <sys/stat.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/sysctl.h> 67 #include <sys/sysproto.h> 68 #include <sys/systm.h> 69 #include <sys/time.h> 70 #include <sys/vmmeter.h> 71 #include <sys/vnode.h> 72 #include <sys/wait.h> 73 #include <sys/cpuset.h> 74 #include <sys/uio.h> 75 76 #include <security/mac/mac_framework.h> 77 78 #include <vm/vm.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_kern.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_extern.h> 83 #include <vm/swap_pager.h> 84 85 #ifdef COMPAT_LINUX32 86 #include <machine/../linux32/linux.h> 87 #include <machine/../linux32/linux32_proto.h> 88 #else 89 #include <machine/../linux/linux.h> 90 #include <machine/../linux/linux_proto.h> 91 #endif 92 93 #include <compat/linux/linux_common.h> 94 #include <compat/linux/linux_dtrace.h> 95 #include <compat/linux/linux_file.h> 96 #include <compat/linux/linux_mib.h> 97 #include <compat/linux/linux_signal.h> 98 #include <compat/linux/linux_timer.h> 99 #include <compat/linux/linux_util.h> 100 #include <compat/linux/linux_sysproto.h> 101 #include <compat/linux/linux_emul.h> 102 #include <compat/linux/linux_misc.h> 103 104 int stclohz; /* Statistics clock frequency */ 105 106 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 107 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 108 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 109 RLIMIT_MEMLOCK, RLIMIT_AS 110 }; 111 112 struct l_sysinfo { 113 l_long uptime; /* Seconds since boot */ 114 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 115 #define LINUX_SYSINFO_LOADS_SCALE 65536 116 l_ulong totalram; /* Total usable main memory size */ 117 l_ulong freeram; /* Available memory size */ 118 l_ulong sharedram; /* Amount of shared memory */ 119 l_ulong bufferram; /* Memory used by buffers */ 120 l_ulong totalswap; /* Total swap space size */ 121 l_ulong freeswap; /* swap space still available */ 122 l_ushort procs; /* Number of current processes */ 123 l_ushort pads; 124 l_ulong totalhigh; 125 l_ulong freehigh; 126 l_uint mem_unit; 127 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 128 }; 129 130 struct l_pselect6arg { 131 l_uintptr_t ss; 132 l_size_t ss_len; 133 }; 134 135 static int linux_utimensat_lts_to_ts(struct l_timespec *, 136 struct timespec *); 137 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 138 static int linux_utimensat_lts64_to_ts(struct l_timespec64 *, 139 struct timespec *); 140 #endif 141 static int linux_common_utimensat(struct thread *, int, 142 const char *, struct timespec *, int); 143 static int linux_common_pselect6(struct thread *, l_int, 144 l_fd_set *, l_fd_set *, l_fd_set *, 145 struct timespec *, l_uintptr_t *); 146 static int linux_common_ppoll(struct thread *, struct pollfd *, 147 uint32_t, struct timespec *, l_sigset_t *, 148 l_size_t); 149 static int linux_pollin(struct thread *, struct pollfd *, 150 struct pollfd *, u_int); 151 static int linux_pollout(struct thread *, struct pollfd *, 152 struct pollfd *, u_int); 153 154 int 155 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 156 { 157 struct l_sysinfo sysinfo; 158 int i, j; 159 struct timespec ts; 160 161 bzero(&sysinfo, sizeof(sysinfo)); 162 getnanouptime(&ts); 163 if (ts.tv_nsec != 0) 164 ts.tv_sec++; 165 sysinfo.uptime = ts.tv_sec; 166 167 /* Use the information from the mib to get our load averages */ 168 for (i = 0; i < 3; i++) 169 sysinfo.loads[i] = averunnable.ldavg[i] * 170 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 171 172 sysinfo.totalram = physmem * PAGE_SIZE; 173 sysinfo.freeram = (u_long)vm_free_count() * PAGE_SIZE; 174 175 /* 176 * sharedram counts pages allocated to named, swap-backed objects such 177 * as shared memory segments and tmpfs files. There is no cheap way to 178 * compute this, so just leave the field unpopulated. Linux itself only 179 * started setting this field in the 3.x timeframe. 180 */ 181 sysinfo.sharedram = 0; 182 sysinfo.bufferram = 0; 183 184 swap_pager_status(&i, &j); 185 sysinfo.totalswap = i * PAGE_SIZE; 186 sysinfo.freeswap = (i - j) * PAGE_SIZE; 187 188 sysinfo.procs = nprocs; 189 190 /* 191 * Platforms supported by the emulation layer do not have a notion of 192 * high memory. 193 */ 194 sysinfo.totalhigh = 0; 195 sysinfo.freehigh = 0; 196 197 sysinfo.mem_unit = 1; 198 199 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 200 } 201 202 #ifdef LINUX_LEGACY_SYSCALLS 203 int 204 linux_alarm(struct thread *td, struct linux_alarm_args *args) 205 { 206 struct itimerval it, old_it; 207 u_int secs; 208 int error __diagused; 209 210 secs = args->secs; 211 /* 212 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2 213 * to match kern_setitimer()'s limit to avoid error from it. 214 * 215 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit 216 * platforms. 217 */ 218 if (secs > INT32_MAX / 2) 219 secs = INT32_MAX / 2; 220 221 it.it_value.tv_sec = secs; 222 it.it_value.tv_usec = 0; 223 timevalclear(&it.it_interval); 224 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 225 KASSERT(error == 0, ("kern_setitimer returns %d", error)); 226 227 if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) || 228 old_it.it_value.tv_usec >= 500000) 229 old_it.it_value.tv_sec++; 230 td->td_retval[0] = old_it.it_value.tv_sec; 231 return (0); 232 } 233 #endif 234 235 int 236 linux_brk(struct thread *td, struct linux_brk_args *args) 237 { 238 struct vmspace *vm = td->td_proc->p_vmspace; 239 uintptr_t new, old; 240 241 old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize); 242 new = (uintptr_t)args->dsend; 243 if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new)) 244 td->td_retval[0] = (register_t)new; 245 else 246 td->td_retval[0] = (register_t)old; 247 248 return (0); 249 } 250 251 #if defined(__i386__) 252 /* XXX: what about amd64/linux32? */ 253 254 int 255 linux_uselib(struct thread *td, struct linux_uselib_args *args) 256 { 257 struct nameidata ni; 258 struct vnode *vp; 259 struct exec *a_out; 260 vm_map_t map; 261 vm_map_entry_t entry; 262 struct vattr attr; 263 vm_offset_t vmaddr; 264 unsigned long file_offset; 265 unsigned long bss_size; 266 char *library; 267 ssize_t aresid; 268 int error; 269 bool locked, opened, textset; 270 271 a_out = NULL; 272 vp = NULL; 273 locked = false; 274 textset = false; 275 opened = false; 276 277 if (!LUSECONVPATH(td)) { 278 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 279 UIO_USERSPACE, args->library); 280 error = namei(&ni); 281 } else { 282 LCONVPATHEXIST(args->library, &library); 283 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 284 UIO_SYSSPACE, library); 285 error = namei(&ni); 286 LFREEPATH(library); 287 } 288 if (error) 289 goto cleanup; 290 291 vp = ni.ni_vp; 292 NDFREE_PNBUF(&ni); 293 294 /* 295 * From here on down, we have a locked vnode that must be unlocked. 296 * XXX: The code below largely duplicates exec_check_permissions(). 297 */ 298 locked = true; 299 300 /* Executable? */ 301 error = VOP_GETATTR(vp, &attr, td->td_ucred); 302 if (error) 303 goto cleanup; 304 305 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 306 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 307 /* EACCESS is what exec(2) returns. */ 308 error = ENOEXEC; 309 goto cleanup; 310 } 311 312 /* Sensible size? */ 313 if (attr.va_size == 0) { 314 error = ENOEXEC; 315 goto cleanup; 316 } 317 318 /* Can we access it? */ 319 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 320 if (error) 321 goto cleanup; 322 323 /* 324 * XXX: This should use vn_open() so that it is properly authorized, 325 * and to reduce code redundancy all over the place here. 326 * XXX: Not really, it duplicates far more of exec_check_permissions() 327 * than vn_open(). 328 */ 329 #ifdef MAC 330 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 331 if (error) 332 goto cleanup; 333 #endif 334 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 335 if (error) 336 goto cleanup; 337 opened = true; 338 339 /* Pull in executable header into exec_map */ 340 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 341 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 342 if (error) 343 goto cleanup; 344 345 /* Is it a Linux binary ? */ 346 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 347 error = ENOEXEC; 348 goto cleanup; 349 } 350 351 /* 352 * While we are here, we should REALLY do some more checks 353 */ 354 355 /* Set file/virtual offset based on a.out variant. */ 356 switch ((int)(a_out->a_magic & 0xffff)) { 357 case 0413: /* ZMAGIC */ 358 file_offset = 1024; 359 break; 360 case 0314: /* QMAGIC */ 361 file_offset = 0; 362 break; 363 default: 364 error = ENOEXEC; 365 goto cleanup; 366 } 367 368 bss_size = round_page(a_out->a_bss); 369 370 /* Check various fields in header for validity/bounds. */ 371 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 372 error = ENOEXEC; 373 goto cleanup; 374 } 375 376 /* text + data can't exceed file size */ 377 if (a_out->a_data + a_out->a_text > attr.va_size) { 378 error = EFAULT; 379 goto cleanup; 380 } 381 382 /* 383 * text/data/bss must not exceed limits 384 * XXX - this is not complete. it should check current usage PLUS 385 * the resources needed by this library. 386 */ 387 PROC_LOCK(td->td_proc); 388 if (a_out->a_text > maxtsiz || 389 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 390 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 391 bss_size) != 0) { 392 PROC_UNLOCK(td->td_proc); 393 error = ENOMEM; 394 goto cleanup; 395 } 396 PROC_UNLOCK(td->td_proc); 397 398 /* 399 * Prevent more writers. 400 */ 401 error = VOP_SET_TEXT(vp); 402 if (error != 0) 403 goto cleanup; 404 textset = true; 405 406 /* 407 * Lock no longer needed 408 */ 409 locked = false; 410 VOP_UNLOCK(vp); 411 412 /* 413 * Check if file_offset page aligned. Currently we cannot handle 414 * misalinged file offsets, and so we read in the entire image 415 * (what a waste). 416 */ 417 if (file_offset & PAGE_MASK) { 418 /* Map text+data read/write/execute */ 419 420 /* a_entry is the load address and is page aligned */ 421 vmaddr = trunc_page(a_out->a_entry); 422 423 /* get anon user mapping, read+write+execute */ 424 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 425 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 426 VM_PROT_ALL, VM_PROT_ALL, 0); 427 if (error) 428 goto cleanup; 429 430 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 431 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 432 td->td_ucred, NOCRED, &aresid, td); 433 if (error != 0) 434 goto cleanup; 435 if (aresid != 0) { 436 error = ENOEXEC; 437 goto cleanup; 438 } 439 } else { 440 /* 441 * for QMAGIC, a_entry is 20 bytes beyond the load address 442 * to skip the executable header 443 */ 444 vmaddr = trunc_page(a_out->a_entry); 445 446 /* 447 * Map it all into the process's space as a single 448 * copy-on-write "data" segment. 449 */ 450 map = &td->td_proc->p_vmspace->vm_map; 451 error = vm_mmap(map, &vmaddr, 452 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 453 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 454 if (error) 455 goto cleanup; 456 vm_map_lock(map); 457 if (!vm_map_lookup_entry(map, vmaddr, &entry)) { 458 vm_map_unlock(map); 459 error = EDOOFUS; 460 goto cleanup; 461 } 462 entry->eflags |= MAP_ENTRY_VN_EXEC; 463 vm_map_unlock(map); 464 textset = false; 465 } 466 467 if (bss_size != 0) { 468 /* Calculate BSS start address */ 469 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 470 a_out->a_data; 471 472 /* allocate some 'anon' space */ 473 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 474 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 475 VM_PROT_ALL, 0); 476 if (error) 477 goto cleanup; 478 } 479 480 cleanup: 481 if (opened) { 482 if (locked) 483 VOP_UNLOCK(vp); 484 locked = false; 485 VOP_CLOSE(vp, FREAD, td->td_ucred, td); 486 } 487 if (textset) { 488 if (!locked) { 489 locked = true; 490 VOP_LOCK(vp, LK_SHARED | LK_RETRY); 491 } 492 VOP_UNSET_TEXT_CHECKED(vp); 493 } 494 if (locked) 495 VOP_UNLOCK(vp); 496 497 /* Release the temporary mapping. */ 498 if (a_out) 499 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 500 501 return (error); 502 } 503 504 #endif /* __i386__ */ 505 506 #ifdef LINUX_LEGACY_SYSCALLS 507 int 508 linux_select(struct thread *td, struct linux_select_args *args) 509 { 510 l_timeval ltv; 511 struct timeval tv0, tv1, utv, *tvp; 512 int error; 513 514 /* 515 * Store current time for computation of the amount of 516 * time left. 517 */ 518 if (args->timeout) { 519 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 520 goto select_out; 521 utv.tv_sec = ltv.tv_sec; 522 utv.tv_usec = ltv.tv_usec; 523 524 if (itimerfix(&utv)) { 525 /* 526 * The timeval was invalid. Convert it to something 527 * valid that will act as it does under Linux. 528 */ 529 utv.tv_sec += utv.tv_usec / 1000000; 530 utv.tv_usec %= 1000000; 531 if (utv.tv_usec < 0) { 532 utv.tv_sec -= 1; 533 utv.tv_usec += 1000000; 534 } 535 if (utv.tv_sec < 0) 536 timevalclear(&utv); 537 } 538 microtime(&tv0); 539 tvp = &utv; 540 } else 541 tvp = NULL; 542 543 error = kern_select(td, args->nfds, args->readfds, args->writefds, 544 args->exceptfds, tvp, LINUX_NFDBITS); 545 if (error) 546 goto select_out; 547 548 if (args->timeout) { 549 if (td->td_retval[0]) { 550 /* 551 * Compute how much time was left of the timeout, 552 * by subtracting the current time and the time 553 * before we started the call, and subtracting 554 * that result from the user-supplied value. 555 */ 556 microtime(&tv1); 557 timevalsub(&tv1, &tv0); 558 timevalsub(&utv, &tv1); 559 if (utv.tv_sec < 0) 560 timevalclear(&utv); 561 } else 562 timevalclear(&utv); 563 ltv.tv_sec = utv.tv_sec; 564 ltv.tv_usec = utv.tv_usec; 565 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 566 goto select_out; 567 } 568 569 select_out: 570 return (error); 571 } 572 #endif 573 574 int 575 linux_mremap(struct thread *td, struct linux_mremap_args *args) 576 { 577 uintptr_t addr; 578 size_t len; 579 int error = 0; 580 581 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 582 td->td_retval[0] = 0; 583 return (EINVAL); 584 } 585 586 /* 587 * Check for the page alignment. 588 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 589 */ 590 if (args->addr & PAGE_MASK) { 591 td->td_retval[0] = 0; 592 return (EINVAL); 593 } 594 595 args->new_len = round_page(args->new_len); 596 args->old_len = round_page(args->old_len); 597 598 if (args->new_len > args->old_len) { 599 td->td_retval[0] = 0; 600 return (ENOMEM); 601 } 602 603 if (args->new_len < args->old_len) { 604 addr = args->addr + args->new_len; 605 len = args->old_len - args->new_len; 606 error = kern_munmap(td, addr, len); 607 } 608 609 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 610 return (error); 611 } 612 613 #define LINUX_MS_ASYNC 0x0001 614 #define LINUX_MS_INVALIDATE 0x0002 615 #define LINUX_MS_SYNC 0x0004 616 617 int 618 linux_msync(struct thread *td, struct linux_msync_args *args) 619 { 620 621 return (kern_msync(td, args->addr, args->len, 622 args->fl & ~LINUX_MS_SYNC)); 623 } 624 625 #ifdef LINUX_LEGACY_SYSCALLS 626 int 627 linux_time(struct thread *td, struct linux_time_args *args) 628 { 629 struct timeval tv; 630 l_time_t tm; 631 int error; 632 633 microtime(&tv); 634 tm = tv.tv_sec; 635 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 636 return (error); 637 td->td_retval[0] = tm; 638 return (0); 639 } 640 #endif 641 642 struct l_times_argv { 643 l_clock_t tms_utime; 644 l_clock_t tms_stime; 645 l_clock_t tms_cutime; 646 l_clock_t tms_cstime; 647 }; 648 649 /* 650 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 651 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 652 * auxiliary vector entry. 653 */ 654 #define CLK_TCK 100 655 656 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 657 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 658 659 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 660 CONVNTCK(r) : CONVOTCK(r)) 661 662 int 663 linux_times(struct thread *td, struct linux_times_args *args) 664 { 665 struct timeval tv, utime, stime, cutime, cstime; 666 struct l_times_argv tms; 667 struct proc *p; 668 int error; 669 670 if (args->buf != NULL) { 671 p = td->td_proc; 672 PROC_LOCK(p); 673 PROC_STATLOCK(p); 674 calcru(p, &utime, &stime); 675 PROC_STATUNLOCK(p); 676 calccru(p, &cutime, &cstime); 677 PROC_UNLOCK(p); 678 679 tms.tms_utime = CONVTCK(utime); 680 tms.tms_stime = CONVTCK(stime); 681 682 tms.tms_cutime = CONVTCK(cutime); 683 tms.tms_cstime = CONVTCK(cstime); 684 685 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 686 return (error); 687 } 688 689 microuptime(&tv); 690 td->td_retval[0] = (int)CONVTCK(tv); 691 return (0); 692 } 693 694 int 695 linux_newuname(struct thread *td, struct linux_newuname_args *args) 696 { 697 struct l_new_utsname utsname; 698 char osname[LINUX_MAX_UTSNAME]; 699 char osrelease[LINUX_MAX_UTSNAME]; 700 char *p; 701 702 linux_get_osname(td, osname); 703 linux_get_osrelease(td, osrelease); 704 705 bzero(&utsname, sizeof(utsname)); 706 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 707 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 708 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 709 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 710 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 711 for (p = utsname.version; *p != '\0'; ++p) 712 if (*p == '\n') { 713 *p = '\0'; 714 break; 715 } 716 #if defined(__amd64__) 717 /* 718 * On amd64, Linux uname(2) needs to return "x86_64" 719 * for both 64-bit and 32-bit applications. On 32-bit, 720 * the string returned by getauxval(AT_PLATFORM) needs 721 * to remain "i686", though. 722 */ 723 #if defined(COMPAT_LINUX32) 724 if (linux32_emulate_i386) 725 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME); 726 else 727 #endif 728 strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME); 729 #elif defined(__aarch64__) 730 strlcpy(utsname.machine, "aarch64", LINUX_MAX_UTSNAME); 731 #elif defined(__i386__) 732 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME); 733 #endif 734 735 return (copyout(&utsname, args->buf, sizeof(utsname))); 736 } 737 738 struct l_utimbuf { 739 l_time_t l_actime; 740 l_time_t l_modtime; 741 }; 742 743 #ifdef LINUX_LEGACY_SYSCALLS 744 int 745 linux_utime(struct thread *td, struct linux_utime_args *args) 746 { 747 struct timeval tv[2], *tvp; 748 struct l_utimbuf lut; 749 char *fname; 750 int error; 751 752 if (args->times) { 753 if ((error = copyin(args->times, &lut, sizeof lut)) != 0) 754 return (error); 755 tv[0].tv_sec = lut.l_actime; 756 tv[0].tv_usec = 0; 757 tv[1].tv_sec = lut.l_modtime; 758 tv[1].tv_usec = 0; 759 tvp = tv; 760 } else 761 tvp = NULL; 762 763 if (!LUSECONVPATH(td)) { 764 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 765 tvp, UIO_SYSSPACE); 766 } else { 767 LCONVPATHEXIST(args->fname, &fname); 768 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp, 769 UIO_SYSSPACE); 770 LFREEPATH(fname); 771 } 772 return (error); 773 } 774 #endif 775 776 #ifdef LINUX_LEGACY_SYSCALLS 777 int 778 linux_utimes(struct thread *td, struct linux_utimes_args *args) 779 { 780 l_timeval ltv[2]; 781 struct timeval tv[2], *tvp = NULL; 782 char *fname; 783 int error; 784 785 if (args->tptr != NULL) { 786 if ((error = copyin(args->tptr, ltv, sizeof ltv)) != 0) 787 return (error); 788 tv[0].tv_sec = ltv[0].tv_sec; 789 tv[0].tv_usec = ltv[0].tv_usec; 790 tv[1].tv_sec = ltv[1].tv_sec; 791 tv[1].tv_usec = ltv[1].tv_usec; 792 tvp = tv; 793 } 794 795 if (!LUSECONVPATH(td)) { 796 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 797 tvp, UIO_SYSSPACE); 798 } else { 799 LCONVPATHEXIST(args->fname, &fname); 800 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, 801 tvp, UIO_SYSSPACE); 802 LFREEPATH(fname); 803 } 804 return (error); 805 } 806 #endif 807 808 static int 809 linux_utimensat_lts_to_ts(struct l_timespec *l_times, struct timespec *times) 810 { 811 812 if (l_times->tv_nsec != LINUX_UTIME_OMIT && 813 l_times->tv_nsec != LINUX_UTIME_NOW && 814 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999)) 815 return (EINVAL); 816 817 times->tv_sec = l_times->tv_sec; 818 switch (l_times->tv_nsec) 819 { 820 case LINUX_UTIME_OMIT: 821 times->tv_nsec = UTIME_OMIT; 822 break; 823 case LINUX_UTIME_NOW: 824 times->tv_nsec = UTIME_NOW; 825 break; 826 default: 827 times->tv_nsec = l_times->tv_nsec; 828 } 829 830 return (0); 831 } 832 833 static int 834 linux_common_utimensat(struct thread *td, int ldfd, const char *pathname, 835 struct timespec *timesp, int lflags) 836 { 837 char *path = NULL; 838 int error, dfd, flags = 0; 839 840 dfd = (ldfd == LINUX_AT_FDCWD) ? AT_FDCWD : ldfd; 841 842 if (lflags & ~(LINUX_AT_SYMLINK_NOFOLLOW | LINUX_AT_EMPTY_PATH)) 843 return (EINVAL); 844 845 if (timesp != NULL) { 846 /* This breaks POSIX, but is what the Linux kernel does 847 * _on purpose_ (documented in the man page for utimensat(2)), 848 * so we must follow that behaviour. */ 849 if (timesp[0].tv_nsec == UTIME_OMIT && 850 timesp[1].tv_nsec == UTIME_OMIT) 851 return (0); 852 } 853 854 if (lflags & LINUX_AT_SYMLINK_NOFOLLOW) 855 flags |= AT_SYMLINK_NOFOLLOW; 856 if (lflags & LINUX_AT_EMPTY_PATH) 857 flags |= AT_EMPTY_PATH; 858 859 if (!LUSECONVPATH(td)) { 860 if (pathname != NULL) { 861 return (kern_utimensat(td, dfd, pathname, 862 UIO_USERSPACE, timesp, UIO_SYSSPACE, flags)); 863 } 864 } 865 866 if (pathname != NULL) 867 LCONVPATHEXIST_AT(pathname, &path, dfd); 868 else if (lflags != 0) 869 return (EINVAL); 870 871 if (path == NULL) 872 error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE); 873 else { 874 error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp, 875 UIO_SYSSPACE, flags); 876 LFREEPATH(path); 877 } 878 879 return (error); 880 } 881 882 int 883 linux_utimensat(struct thread *td, struct linux_utimensat_args *args) 884 { 885 struct l_timespec l_times[2]; 886 struct timespec times[2], *timesp; 887 int error; 888 889 if (args->times != NULL) { 890 error = copyin(args->times, l_times, sizeof(l_times)); 891 if (error != 0) 892 return (error); 893 894 error = linux_utimensat_lts_to_ts(&l_times[0], ×[0]); 895 if (error != 0) 896 return (error); 897 error = linux_utimensat_lts_to_ts(&l_times[1], ×[1]); 898 if (error != 0) 899 return (error); 900 timesp = times; 901 } else 902 timesp = NULL; 903 904 return (linux_common_utimensat(td, args->dfd, args->pathname, 905 timesp, args->flags)); 906 } 907 908 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 909 static int 910 linux_utimensat_lts64_to_ts(struct l_timespec64 *l_times, struct timespec *times) 911 { 912 913 /* Zero out the padding in compat mode. */ 914 l_times->tv_nsec &= 0xFFFFFFFFUL; 915 916 if (l_times->tv_nsec != LINUX_UTIME_OMIT && 917 l_times->tv_nsec != LINUX_UTIME_NOW && 918 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999)) 919 return (EINVAL); 920 921 times->tv_sec = l_times->tv_sec; 922 switch (l_times->tv_nsec) 923 { 924 case LINUX_UTIME_OMIT: 925 times->tv_nsec = UTIME_OMIT; 926 break; 927 case LINUX_UTIME_NOW: 928 times->tv_nsec = UTIME_NOW; 929 break; 930 default: 931 times->tv_nsec = l_times->tv_nsec; 932 } 933 934 return (0); 935 } 936 937 int 938 linux_utimensat_time64(struct thread *td, struct linux_utimensat_time64_args *args) 939 { 940 struct l_timespec64 l_times[2]; 941 struct timespec times[2], *timesp; 942 int error; 943 944 if (args->times64 != NULL) { 945 error = copyin(args->times64, l_times, sizeof(l_times)); 946 if (error != 0) 947 return (error); 948 949 error = linux_utimensat_lts64_to_ts(&l_times[0], ×[0]); 950 if (error != 0) 951 return (error); 952 error = linux_utimensat_lts64_to_ts(&l_times[1], ×[1]); 953 if (error != 0) 954 return (error); 955 timesp = times; 956 } else 957 timesp = NULL; 958 959 return (linux_common_utimensat(td, args->dfd, args->pathname, 960 timesp, args->flags)); 961 } 962 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 963 964 #ifdef LINUX_LEGACY_SYSCALLS 965 int 966 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 967 { 968 l_timeval ltv[2]; 969 struct timeval tv[2], *tvp = NULL; 970 char *fname; 971 int error, dfd; 972 973 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 974 975 if (args->utimes != NULL) { 976 if ((error = copyin(args->utimes, ltv, sizeof ltv)) != 0) 977 return (error); 978 tv[0].tv_sec = ltv[0].tv_sec; 979 tv[0].tv_usec = ltv[0].tv_usec; 980 tv[1].tv_sec = ltv[1].tv_sec; 981 tv[1].tv_usec = ltv[1].tv_usec; 982 tvp = tv; 983 } 984 985 if (!LUSECONVPATH(td)) { 986 error = kern_utimesat(td, dfd, args->filename, UIO_USERSPACE, 987 tvp, UIO_SYSSPACE); 988 } else { 989 LCONVPATHEXIST_AT(args->filename, &fname, dfd); 990 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, 991 tvp, UIO_SYSSPACE); 992 LFREEPATH(fname); 993 } 994 return (error); 995 } 996 #endif 997 998 static int 999 linux_common_wait(struct thread *td, idtype_t idtype, int id, int *statusp, 1000 int options, void *rup, l_siginfo_t *infop) 1001 { 1002 l_siginfo_t lsi; 1003 siginfo_t siginfo; 1004 struct __wrusage wru; 1005 int error, status, tmpstat, sig; 1006 1007 error = kern_wait6(td, idtype, id, &status, options, 1008 rup != NULL ? &wru : NULL, &siginfo); 1009 1010 if (error == 0 && statusp) { 1011 tmpstat = status & 0xffff; 1012 if (WIFSIGNALED(tmpstat)) { 1013 tmpstat = (tmpstat & 0xffffff80) | 1014 bsd_to_linux_signal(WTERMSIG(tmpstat)); 1015 } else if (WIFSTOPPED(tmpstat)) { 1016 tmpstat = (tmpstat & 0xffff00ff) | 1017 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8); 1018 #if defined(__aarch64__) || (defined(__amd64__) && !defined(COMPAT_LINUX32)) 1019 if (WSTOPSIG(status) == SIGTRAP) { 1020 tmpstat = linux_ptrace_status(td, 1021 siginfo.si_pid, tmpstat); 1022 } 1023 #endif 1024 } else if (WIFCONTINUED(tmpstat)) { 1025 tmpstat = 0xffff; 1026 } 1027 error = copyout(&tmpstat, statusp, sizeof(int)); 1028 } 1029 if (error == 0 && rup != NULL) 1030 error = linux_copyout_rusage(&wru.wru_self, rup); 1031 if (error == 0 && infop != NULL && td->td_retval[0] != 0) { 1032 sig = bsd_to_linux_signal(siginfo.si_signo); 1033 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 1034 error = copyout(&lsi, infop, sizeof(lsi)); 1035 } 1036 1037 return (error); 1038 } 1039 1040 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1041 int 1042 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 1043 { 1044 struct linux_wait4_args wait4_args; 1045 1046 wait4_args.pid = args->pid; 1047 wait4_args.status = args->status; 1048 wait4_args.options = args->options; 1049 wait4_args.rusage = NULL; 1050 1051 return (linux_wait4(td, &wait4_args)); 1052 } 1053 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1054 1055 int 1056 linux_wait4(struct thread *td, struct linux_wait4_args *args) 1057 { 1058 struct proc *p; 1059 int options, id, idtype; 1060 1061 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG | 1062 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 1063 return (EINVAL); 1064 1065 /* -INT_MIN is not defined. */ 1066 if (args->pid == INT_MIN) 1067 return (ESRCH); 1068 1069 options = 0; 1070 linux_to_bsd_waitopts(args->options, &options); 1071 1072 /* 1073 * For backward compatibility we implicitly add flags WEXITED 1074 * and WTRAPPED here. 1075 */ 1076 options |= WEXITED | WTRAPPED; 1077 1078 /* 1079 * As FreeBSD does not have __WALL option bit analogue explicitly set all 1080 * possible option bits to emulate Linux __WALL wait option bit. The same 1081 * for waitid system call. 1082 */ 1083 if ((args->options & __WALL) != 0) 1084 options |= WUNTRACED | WCONTINUED | WLINUXCLONE; 1085 1086 if (args->pid == WAIT_ANY) { 1087 idtype = P_ALL; 1088 id = 0; 1089 } else if (args->pid < 0) { 1090 idtype = P_PGID; 1091 id = (id_t)-args->pid; 1092 } else if (args->pid == 0) { 1093 idtype = P_PGID; 1094 p = td->td_proc; 1095 PROC_LOCK(p); 1096 id = p->p_pgid; 1097 PROC_UNLOCK(p); 1098 } else { 1099 idtype = P_PID; 1100 id = (id_t)args->pid; 1101 } 1102 1103 return (linux_common_wait(td, idtype, id, args->status, options, 1104 args->rusage, NULL)); 1105 } 1106 1107 int 1108 linux_waitid(struct thread *td, struct linux_waitid_args *args) 1109 { 1110 idtype_t idtype; 1111 int error, options; 1112 struct proc *p; 1113 pid_t id; 1114 1115 if (args->options & ~(LINUX_WNOHANG | LINUX_WNOWAIT | LINUX_WEXITED | 1116 LINUX_WSTOPPED | LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 1117 return (EINVAL); 1118 1119 options = 0; 1120 linux_to_bsd_waitopts(args->options, &options); 1121 if ((args->options & __WALL) != 0) 1122 options |= WEXITED | WTRAPPED | WUNTRACED | 1123 WCONTINUED | WLINUXCLONE; 1124 1125 id = args->id; 1126 switch (args->idtype) { 1127 case LINUX_P_ALL: 1128 idtype = P_ALL; 1129 break; 1130 case LINUX_P_PID: 1131 if (args->id <= 0) 1132 return (EINVAL); 1133 idtype = P_PID; 1134 break; 1135 case LINUX_P_PGID: 1136 if (linux_use54(td) && args->id == 0) { 1137 p = td->td_proc; 1138 PROC_LOCK(p); 1139 id = p->p_pgid; 1140 PROC_UNLOCK(p); 1141 } else if (args->id <= 0) 1142 return (EINVAL); 1143 idtype = P_PGID; 1144 break; 1145 case LINUX_P_PIDFD: 1146 LINUX_RATELIMIT_MSG("unsupported waitid P_PIDFD idtype"); 1147 return (ENOSYS); 1148 default: 1149 return (EINVAL); 1150 } 1151 1152 error = linux_common_wait(td, idtype, id, NULL, options, 1153 args->rusage, args->info); 1154 td->td_retval[0] = 0; 1155 1156 return (error); 1157 } 1158 1159 #ifdef LINUX_LEGACY_SYSCALLS 1160 int 1161 linux_mknod(struct thread *td, struct linux_mknod_args *args) 1162 { 1163 char *path; 1164 int error; 1165 enum uio_seg seg; 1166 bool convpath; 1167 1168 convpath = LUSECONVPATH(td); 1169 if (!convpath) { 1170 path = args->path; 1171 seg = UIO_USERSPACE; 1172 } else { 1173 LCONVPATHCREAT(args->path, &path); 1174 seg = UIO_SYSSPACE; 1175 } 1176 1177 switch (args->mode & S_IFMT) { 1178 case S_IFIFO: 1179 case S_IFSOCK: 1180 error = kern_mkfifoat(td, AT_FDCWD, path, seg, 1181 args->mode); 1182 break; 1183 1184 case S_IFCHR: 1185 case S_IFBLK: 1186 error = kern_mknodat(td, AT_FDCWD, path, seg, 1187 args->mode, args->dev); 1188 break; 1189 1190 case S_IFDIR: 1191 error = EPERM; 1192 break; 1193 1194 case 0: 1195 args->mode |= S_IFREG; 1196 /* FALLTHROUGH */ 1197 case S_IFREG: 1198 error = kern_openat(td, AT_FDCWD, path, seg, 1199 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1200 if (error == 0) 1201 kern_close(td, td->td_retval[0]); 1202 break; 1203 1204 default: 1205 error = EINVAL; 1206 break; 1207 } 1208 if (convpath) 1209 LFREEPATH(path); 1210 return (error); 1211 } 1212 #endif 1213 1214 int 1215 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1216 { 1217 char *path; 1218 int error, dfd; 1219 enum uio_seg seg; 1220 bool convpath; 1221 1222 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1223 1224 convpath = LUSECONVPATH(td); 1225 if (!convpath) { 1226 path = __DECONST(char *, args->filename); 1227 seg = UIO_USERSPACE; 1228 } else { 1229 LCONVPATHCREAT_AT(args->filename, &path, dfd); 1230 seg = UIO_SYSSPACE; 1231 } 1232 1233 switch (args->mode & S_IFMT) { 1234 case S_IFIFO: 1235 case S_IFSOCK: 1236 error = kern_mkfifoat(td, dfd, path, seg, args->mode); 1237 break; 1238 1239 case S_IFCHR: 1240 case S_IFBLK: 1241 error = kern_mknodat(td, dfd, path, seg, args->mode, 1242 args->dev); 1243 break; 1244 1245 case S_IFDIR: 1246 error = EPERM; 1247 break; 1248 1249 case 0: 1250 args->mode |= S_IFREG; 1251 /* FALLTHROUGH */ 1252 case S_IFREG: 1253 error = kern_openat(td, dfd, path, seg, 1254 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1255 if (error == 0) 1256 kern_close(td, td->td_retval[0]); 1257 break; 1258 1259 default: 1260 error = EINVAL; 1261 break; 1262 } 1263 if (convpath) 1264 LFREEPATH(path); 1265 return (error); 1266 } 1267 1268 /* 1269 * UGH! This is just about the dumbest idea I've ever heard!! 1270 */ 1271 int 1272 linux_personality(struct thread *td, struct linux_personality_args *args) 1273 { 1274 struct linux_pemuldata *pem; 1275 struct proc *p = td->td_proc; 1276 uint32_t old; 1277 1278 PROC_LOCK(p); 1279 pem = pem_find(p); 1280 old = pem->persona; 1281 if (args->per != 0xffffffff) 1282 pem->persona = args->per; 1283 PROC_UNLOCK(p); 1284 1285 td->td_retval[0] = old; 1286 return (0); 1287 } 1288 1289 struct l_itimerval { 1290 l_timeval it_interval; 1291 l_timeval it_value; 1292 }; 1293 1294 #define B2L_ITIMERVAL(bip, lip) \ 1295 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1296 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1297 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1298 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1299 1300 int 1301 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1302 { 1303 int error; 1304 struct l_itimerval ls; 1305 struct itimerval aitv, oitv; 1306 1307 if (uap->itv == NULL) { 1308 uap->itv = uap->oitv; 1309 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1310 } 1311 1312 error = copyin(uap->itv, &ls, sizeof(ls)); 1313 if (error != 0) 1314 return (error); 1315 B2L_ITIMERVAL(&aitv, &ls); 1316 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1317 if (error != 0 || uap->oitv == NULL) 1318 return (error); 1319 B2L_ITIMERVAL(&ls, &oitv); 1320 1321 return (copyout(&ls, uap->oitv, sizeof(ls))); 1322 } 1323 1324 int 1325 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1326 { 1327 int error; 1328 struct l_itimerval ls; 1329 struct itimerval aitv; 1330 1331 error = kern_getitimer(td, uap->which, &aitv); 1332 if (error != 0) 1333 return (error); 1334 B2L_ITIMERVAL(&ls, &aitv); 1335 return (copyout(&ls, uap->itv, sizeof(ls))); 1336 } 1337 1338 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1339 int 1340 linux_nice(struct thread *td, struct linux_nice_args *args) 1341 { 1342 1343 return (kern_setpriority(td, PRIO_PROCESS, 0, args->inc)); 1344 } 1345 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1346 1347 int 1348 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1349 { 1350 struct ucred *newcred, *oldcred; 1351 l_gid_t *linux_gidset; 1352 gid_t *bsd_gidset; 1353 int ngrp, error; 1354 struct proc *p; 1355 1356 ngrp = args->gidsetsize; 1357 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1358 return (EINVAL); 1359 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK); 1360 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1361 if (error) 1362 goto out; 1363 newcred = crget(); 1364 crextend(newcred, ngrp + 1); 1365 p = td->td_proc; 1366 PROC_LOCK(p); 1367 oldcred = p->p_ucred; 1368 crcopy(newcred, oldcred); 1369 1370 /* 1371 * cr_groups[0] holds egid. Setting the whole set from 1372 * the supplied set will cause egid to be changed too. 1373 * Keep cr_groups[0] unchanged to prevent that. 1374 */ 1375 1376 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) { 1377 PROC_UNLOCK(p); 1378 crfree(newcred); 1379 goto out; 1380 } 1381 1382 if (ngrp > 0) { 1383 newcred->cr_ngroups = ngrp + 1; 1384 1385 bsd_gidset = newcred->cr_groups; 1386 ngrp--; 1387 while (ngrp >= 0) { 1388 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1389 ngrp--; 1390 } 1391 } else 1392 newcred->cr_ngroups = 1; 1393 1394 setsugid(p); 1395 proc_set_cred(p, newcred); 1396 PROC_UNLOCK(p); 1397 crfree(oldcred); 1398 error = 0; 1399 out: 1400 free(linux_gidset, M_LINUX); 1401 return (error); 1402 } 1403 1404 int 1405 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1406 { 1407 struct ucred *cred; 1408 l_gid_t *linux_gidset; 1409 gid_t *bsd_gidset; 1410 int bsd_gidsetsz, ngrp, error; 1411 1412 cred = td->td_ucred; 1413 bsd_gidset = cred->cr_groups; 1414 bsd_gidsetsz = cred->cr_ngroups - 1; 1415 1416 /* 1417 * cr_groups[0] holds egid. Returning the whole set 1418 * here will cause a duplicate. Exclude cr_groups[0] 1419 * to prevent that. 1420 */ 1421 1422 if ((ngrp = args->gidsetsize) == 0) { 1423 td->td_retval[0] = bsd_gidsetsz; 1424 return (0); 1425 } 1426 1427 if (ngrp < bsd_gidsetsz) 1428 return (EINVAL); 1429 1430 ngrp = 0; 1431 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1432 M_LINUX, M_WAITOK); 1433 while (ngrp < bsd_gidsetsz) { 1434 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1435 ngrp++; 1436 } 1437 1438 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1439 free(linux_gidset, M_LINUX); 1440 if (error) 1441 return (error); 1442 1443 td->td_retval[0] = ngrp; 1444 return (0); 1445 } 1446 1447 static bool 1448 linux_get_dummy_limit(l_uint resource, struct rlimit *rlim) 1449 { 1450 1451 if (linux_dummy_rlimits == 0) 1452 return (false); 1453 1454 switch (resource) { 1455 case LINUX_RLIMIT_LOCKS: 1456 case LINUX_RLIMIT_SIGPENDING: 1457 case LINUX_RLIMIT_MSGQUEUE: 1458 case LINUX_RLIMIT_RTTIME: 1459 rlim->rlim_cur = LINUX_RLIM_INFINITY; 1460 rlim->rlim_max = LINUX_RLIM_INFINITY; 1461 return (true); 1462 case LINUX_RLIMIT_NICE: 1463 case LINUX_RLIMIT_RTPRIO: 1464 rlim->rlim_cur = 0; 1465 rlim->rlim_max = 0; 1466 return (true); 1467 default: 1468 return (false); 1469 } 1470 } 1471 1472 int 1473 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1474 { 1475 struct rlimit bsd_rlim; 1476 struct l_rlimit rlim; 1477 u_int which; 1478 int error; 1479 1480 if (args->resource >= LINUX_RLIM_NLIMITS) 1481 return (EINVAL); 1482 1483 which = linux_to_bsd_resource[args->resource]; 1484 if (which == -1) 1485 return (EINVAL); 1486 1487 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1488 if (error) 1489 return (error); 1490 1491 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1492 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1493 return (kern_setrlimit(td, which, &bsd_rlim)); 1494 } 1495 1496 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1497 int 1498 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1499 { 1500 struct l_rlimit rlim; 1501 struct rlimit bsd_rlim; 1502 u_int which; 1503 1504 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1505 rlim.rlim_cur = bsd_rlim.rlim_cur; 1506 rlim.rlim_max = bsd_rlim.rlim_max; 1507 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1508 } 1509 1510 if (args->resource >= LINUX_RLIM_NLIMITS) 1511 return (EINVAL); 1512 1513 which = linux_to_bsd_resource[args->resource]; 1514 if (which == -1) 1515 return (EINVAL); 1516 1517 lim_rlimit(td, which, &bsd_rlim); 1518 1519 #ifdef COMPAT_LINUX32 1520 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1521 if (rlim.rlim_cur == UINT_MAX) 1522 rlim.rlim_cur = INT_MAX; 1523 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1524 if (rlim.rlim_max == UINT_MAX) 1525 rlim.rlim_max = INT_MAX; 1526 #else 1527 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1528 if (rlim.rlim_cur == ULONG_MAX) 1529 rlim.rlim_cur = LONG_MAX; 1530 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1531 if (rlim.rlim_max == ULONG_MAX) 1532 rlim.rlim_max = LONG_MAX; 1533 #endif 1534 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1535 } 1536 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1537 1538 int 1539 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1540 { 1541 struct l_rlimit rlim; 1542 struct rlimit bsd_rlim; 1543 u_int which; 1544 1545 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1546 rlim.rlim_cur = bsd_rlim.rlim_cur; 1547 rlim.rlim_max = bsd_rlim.rlim_max; 1548 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1549 } 1550 1551 if (args->resource >= LINUX_RLIM_NLIMITS) 1552 return (EINVAL); 1553 1554 which = linux_to_bsd_resource[args->resource]; 1555 if (which == -1) 1556 return (EINVAL); 1557 1558 lim_rlimit(td, which, &bsd_rlim); 1559 1560 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1561 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1562 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1563 } 1564 1565 int 1566 linux_sched_setscheduler(struct thread *td, 1567 struct linux_sched_setscheduler_args *args) 1568 { 1569 struct sched_param sched_param; 1570 struct thread *tdt; 1571 int error, policy; 1572 1573 switch (args->policy) { 1574 case LINUX_SCHED_OTHER: 1575 policy = SCHED_OTHER; 1576 break; 1577 case LINUX_SCHED_FIFO: 1578 policy = SCHED_FIFO; 1579 break; 1580 case LINUX_SCHED_RR: 1581 policy = SCHED_RR; 1582 break; 1583 default: 1584 return (EINVAL); 1585 } 1586 1587 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1588 if (error) 1589 return (error); 1590 1591 if (linux_map_sched_prio) { 1592 switch (policy) { 1593 case SCHED_OTHER: 1594 if (sched_param.sched_priority != 0) 1595 return (EINVAL); 1596 1597 sched_param.sched_priority = 1598 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 1599 break; 1600 case SCHED_FIFO: 1601 case SCHED_RR: 1602 if (sched_param.sched_priority < 1 || 1603 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) 1604 return (EINVAL); 1605 1606 /* 1607 * Map [1, LINUX_MAX_RT_PRIO - 1] to 1608 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 1609 */ 1610 sched_param.sched_priority = 1611 (sched_param.sched_priority - 1) * 1612 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 1613 (LINUX_MAX_RT_PRIO - 1); 1614 break; 1615 } 1616 } 1617 1618 tdt = linux_tdfind(td, args->pid, -1); 1619 if (tdt == NULL) 1620 return (ESRCH); 1621 1622 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1623 PROC_UNLOCK(tdt->td_proc); 1624 return (error); 1625 } 1626 1627 int 1628 linux_sched_getscheduler(struct thread *td, 1629 struct linux_sched_getscheduler_args *args) 1630 { 1631 struct thread *tdt; 1632 int error, policy; 1633 1634 tdt = linux_tdfind(td, args->pid, -1); 1635 if (tdt == NULL) 1636 return (ESRCH); 1637 1638 error = kern_sched_getscheduler(td, tdt, &policy); 1639 PROC_UNLOCK(tdt->td_proc); 1640 1641 switch (policy) { 1642 case SCHED_OTHER: 1643 td->td_retval[0] = LINUX_SCHED_OTHER; 1644 break; 1645 case SCHED_FIFO: 1646 td->td_retval[0] = LINUX_SCHED_FIFO; 1647 break; 1648 case SCHED_RR: 1649 td->td_retval[0] = LINUX_SCHED_RR; 1650 break; 1651 } 1652 return (error); 1653 } 1654 1655 int 1656 linux_sched_get_priority_max(struct thread *td, 1657 struct linux_sched_get_priority_max_args *args) 1658 { 1659 struct sched_get_priority_max_args bsd; 1660 1661 if (linux_map_sched_prio) { 1662 switch (args->policy) { 1663 case LINUX_SCHED_OTHER: 1664 td->td_retval[0] = 0; 1665 return (0); 1666 case LINUX_SCHED_FIFO: 1667 case LINUX_SCHED_RR: 1668 td->td_retval[0] = LINUX_MAX_RT_PRIO - 1; 1669 return (0); 1670 default: 1671 return (EINVAL); 1672 } 1673 } 1674 1675 switch (args->policy) { 1676 case LINUX_SCHED_OTHER: 1677 bsd.policy = SCHED_OTHER; 1678 break; 1679 case LINUX_SCHED_FIFO: 1680 bsd.policy = SCHED_FIFO; 1681 break; 1682 case LINUX_SCHED_RR: 1683 bsd.policy = SCHED_RR; 1684 break; 1685 default: 1686 return (EINVAL); 1687 } 1688 return (sys_sched_get_priority_max(td, &bsd)); 1689 } 1690 1691 int 1692 linux_sched_get_priority_min(struct thread *td, 1693 struct linux_sched_get_priority_min_args *args) 1694 { 1695 struct sched_get_priority_min_args bsd; 1696 1697 if (linux_map_sched_prio) { 1698 switch (args->policy) { 1699 case LINUX_SCHED_OTHER: 1700 td->td_retval[0] = 0; 1701 return (0); 1702 case LINUX_SCHED_FIFO: 1703 case LINUX_SCHED_RR: 1704 td->td_retval[0] = 1; 1705 return (0); 1706 default: 1707 return (EINVAL); 1708 } 1709 } 1710 1711 switch (args->policy) { 1712 case LINUX_SCHED_OTHER: 1713 bsd.policy = SCHED_OTHER; 1714 break; 1715 case LINUX_SCHED_FIFO: 1716 bsd.policy = SCHED_FIFO; 1717 break; 1718 case LINUX_SCHED_RR: 1719 bsd.policy = SCHED_RR; 1720 break; 1721 default: 1722 return (EINVAL); 1723 } 1724 return (sys_sched_get_priority_min(td, &bsd)); 1725 } 1726 1727 #define REBOOT_CAD_ON 0x89abcdef 1728 #define REBOOT_CAD_OFF 0 1729 #define REBOOT_HALT 0xcdef0123 1730 #define REBOOT_RESTART 0x01234567 1731 #define REBOOT_RESTART2 0xA1B2C3D4 1732 #define REBOOT_POWEROFF 0x4321FEDC 1733 #define REBOOT_MAGIC1 0xfee1dead 1734 #define REBOOT_MAGIC2 0x28121969 1735 #define REBOOT_MAGIC2A 0x05121996 1736 #define REBOOT_MAGIC2B 0x16041998 1737 1738 int 1739 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1740 { 1741 struct reboot_args bsd_args; 1742 1743 if (args->magic1 != REBOOT_MAGIC1) 1744 return (EINVAL); 1745 1746 switch (args->magic2) { 1747 case REBOOT_MAGIC2: 1748 case REBOOT_MAGIC2A: 1749 case REBOOT_MAGIC2B: 1750 break; 1751 default: 1752 return (EINVAL); 1753 } 1754 1755 switch (args->cmd) { 1756 case REBOOT_CAD_ON: 1757 case REBOOT_CAD_OFF: 1758 return (priv_check(td, PRIV_REBOOT)); 1759 case REBOOT_HALT: 1760 bsd_args.opt = RB_HALT; 1761 break; 1762 case REBOOT_RESTART: 1763 case REBOOT_RESTART2: 1764 bsd_args.opt = 0; 1765 break; 1766 case REBOOT_POWEROFF: 1767 bsd_args.opt = RB_POWEROFF; 1768 break; 1769 default: 1770 return (EINVAL); 1771 } 1772 return (sys_reboot(td, &bsd_args)); 1773 } 1774 1775 int 1776 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1777 { 1778 1779 td->td_retval[0] = td->td_proc->p_pid; 1780 1781 return (0); 1782 } 1783 1784 int 1785 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1786 { 1787 struct linux_emuldata *em; 1788 1789 em = em_find(td); 1790 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1791 1792 td->td_retval[0] = em->em_tid; 1793 1794 return (0); 1795 } 1796 1797 int 1798 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1799 { 1800 1801 td->td_retval[0] = kern_getppid(td); 1802 return (0); 1803 } 1804 1805 int 1806 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1807 { 1808 1809 td->td_retval[0] = td->td_ucred->cr_rgid; 1810 return (0); 1811 } 1812 1813 int 1814 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1815 { 1816 1817 td->td_retval[0] = td->td_ucred->cr_ruid; 1818 return (0); 1819 } 1820 1821 int 1822 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1823 { 1824 1825 return (kern_getsid(td, args->pid)); 1826 } 1827 1828 int 1829 linux_nosys(struct thread *td, struct nosys_args *ignore) 1830 { 1831 1832 return (ENOSYS); 1833 } 1834 1835 int 1836 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1837 { 1838 int error; 1839 1840 error = kern_getpriority(td, args->which, args->who); 1841 td->td_retval[0] = 20 - td->td_retval[0]; 1842 return (error); 1843 } 1844 1845 int 1846 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1847 { 1848 int name[2]; 1849 1850 name[0] = CTL_KERN; 1851 name[1] = KERN_HOSTNAME; 1852 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1853 args->len, 0, 0)); 1854 } 1855 1856 int 1857 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1858 { 1859 int name[2]; 1860 1861 name[0] = CTL_KERN; 1862 name[1] = KERN_NISDOMAINNAME; 1863 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1864 args->len, 0, 0)); 1865 } 1866 1867 int 1868 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1869 { 1870 1871 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1872 args->error_code); 1873 1874 /* 1875 * XXX: we should send a signal to the parent if 1876 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1877 * as it doesnt occur often. 1878 */ 1879 exit1(td, args->error_code, 0); 1880 /* NOTREACHED */ 1881 } 1882 1883 #define _LINUX_CAPABILITY_VERSION_1 0x19980330 1884 #define _LINUX_CAPABILITY_VERSION_2 0x20071026 1885 #define _LINUX_CAPABILITY_VERSION_3 0x20080522 1886 1887 struct l_user_cap_header { 1888 l_int version; 1889 l_int pid; 1890 }; 1891 1892 struct l_user_cap_data { 1893 l_int effective; 1894 l_int permitted; 1895 l_int inheritable; 1896 }; 1897 1898 int 1899 linux_capget(struct thread *td, struct linux_capget_args *uap) 1900 { 1901 struct l_user_cap_header luch; 1902 struct l_user_cap_data lucd[2]; 1903 int error, u32s; 1904 1905 if (uap->hdrp == NULL) 1906 return (EFAULT); 1907 1908 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1909 if (error != 0) 1910 return (error); 1911 1912 switch (luch.version) { 1913 case _LINUX_CAPABILITY_VERSION_1: 1914 u32s = 1; 1915 break; 1916 case _LINUX_CAPABILITY_VERSION_2: 1917 case _LINUX_CAPABILITY_VERSION_3: 1918 u32s = 2; 1919 break; 1920 default: 1921 luch.version = _LINUX_CAPABILITY_VERSION_1; 1922 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1923 if (error) 1924 return (error); 1925 return (EINVAL); 1926 } 1927 1928 if (luch.pid) 1929 return (EPERM); 1930 1931 if (uap->datap) { 1932 /* 1933 * The current implementation doesn't support setting 1934 * a capability (it's essentially a stub) so indicate 1935 * that no capabilities are currently set or available 1936 * to request. 1937 */ 1938 memset(&lucd, 0, u32s * sizeof(lucd[0])); 1939 error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0])); 1940 } 1941 1942 return (error); 1943 } 1944 1945 int 1946 linux_capset(struct thread *td, struct linux_capset_args *uap) 1947 { 1948 struct l_user_cap_header luch; 1949 struct l_user_cap_data lucd[2]; 1950 int error, i, u32s; 1951 1952 if (uap->hdrp == NULL || uap->datap == NULL) 1953 return (EFAULT); 1954 1955 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1956 if (error != 0) 1957 return (error); 1958 1959 switch (luch.version) { 1960 case _LINUX_CAPABILITY_VERSION_1: 1961 u32s = 1; 1962 break; 1963 case _LINUX_CAPABILITY_VERSION_2: 1964 case _LINUX_CAPABILITY_VERSION_3: 1965 u32s = 2; 1966 break; 1967 default: 1968 luch.version = _LINUX_CAPABILITY_VERSION_1; 1969 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1970 if (error) 1971 return (error); 1972 return (EINVAL); 1973 } 1974 1975 if (luch.pid) 1976 return (EPERM); 1977 1978 error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0])); 1979 if (error != 0) 1980 return (error); 1981 1982 /* We currently don't support setting any capabilities. */ 1983 for (i = 0; i < u32s; i++) { 1984 if (lucd[i].effective || lucd[i].permitted || 1985 lucd[i].inheritable) { 1986 linux_msg(td, 1987 "capset[%d] effective=0x%x, permitted=0x%x, " 1988 "inheritable=0x%x is not implemented", i, 1989 (int)lucd[i].effective, (int)lucd[i].permitted, 1990 (int)lucd[i].inheritable); 1991 return (EPERM); 1992 } 1993 } 1994 1995 return (0); 1996 } 1997 1998 int 1999 linux_prctl(struct thread *td, struct linux_prctl_args *args) 2000 { 2001 int error = 0, max_size, arg; 2002 struct proc *p = td->td_proc; 2003 char comm[LINUX_MAX_COMM_LEN]; 2004 int pdeath_signal, trace_state; 2005 2006 switch (args->option) { 2007 case LINUX_PR_SET_PDEATHSIG: 2008 if (!LINUX_SIG_VALID(args->arg2)) 2009 return (EINVAL); 2010 pdeath_signal = linux_to_bsd_signal(args->arg2); 2011 return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL, 2012 &pdeath_signal)); 2013 case LINUX_PR_GET_PDEATHSIG: 2014 error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS, 2015 &pdeath_signal); 2016 if (error != 0) 2017 return (error); 2018 pdeath_signal = bsd_to_linux_signal(pdeath_signal); 2019 return (copyout(&pdeath_signal, 2020 (void *)(register_t)args->arg2, 2021 sizeof(pdeath_signal))); 2022 /* 2023 * In Linux, this flag controls if set[gu]id processes can coredump. 2024 * There are additional semantics imposed on processes that cannot 2025 * coredump: 2026 * - Such processes can not be ptraced. 2027 * - There are some semantics around ownership of process-related files 2028 * in the /proc namespace. 2029 * 2030 * In FreeBSD, we can (and by default, do) disable setuid coredump 2031 * system-wide with 'sugid_coredump.' We control tracability on a 2032 * per-process basis with the procctl PROC_TRACE (=> P2_NOTRACE flag). 2033 * By happy coincidence, P2_NOTRACE also prevents coredumping. So the 2034 * procctl is roughly analogous to Linux's DUMPABLE. 2035 * 2036 * So, proxy these knobs to the corresponding PROC_TRACE setting. 2037 */ 2038 case LINUX_PR_GET_DUMPABLE: 2039 error = kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_STATUS, 2040 &trace_state); 2041 if (error != 0) 2042 return (error); 2043 td->td_retval[0] = (trace_state != -1); 2044 return (0); 2045 case LINUX_PR_SET_DUMPABLE: 2046 /* 2047 * It is only valid for userspace to set one of these two 2048 * flags, and only one at a time. 2049 */ 2050 switch (args->arg2) { 2051 case LINUX_SUID_DUMP_DISABLE: 2052 trace_state = PROC_TRACE_CTL_DISABLE_EXEC; 2053 break; 2054 case LINUX_SUID_DUMP_USER: 2055 trace_state = PROC_TRACE_CTL_ENABLE; 2056 break; 2057 default: 2058 return (EINVAL); 2059 } 2060 return (kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_CTL, 2061 &trace_state)); 2062 case LINUX_PR_GET_KEEPCAPS: 2063 /* 2064 * Indicate that we always clear the effective and 2065 * permitted capability sets when the user id becomes 2066 * non-zero (actually the capability sets are simply 2067 * always zero in the current implementation). 2068 */ 2069 td->td_retval[0] = 0; 2070 break; 2071 case LINUX_PR_SET_KEEPCAPS: 2072 /* 2073 * Ignore requests to keep the effective and permitted 2074 * capability sets when the user id becomes non-zero. 2075 */ 2076 break; 2077 case LINUX_PR_SET_NAME: 2078 /* 2079 * To be on the safe side we need to make sure to not 2080 * overflow the size a Linux program expects. We already 2081 * do this here in the copyin, so that we don't need to 2082 * check on copyout. 2083 */ 2084 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 2085 error = copyinstr((void *)(register_t)args->arg2, comm, 2086 max_size, NULL); 2087 2088 /* Linux silently truncates the name if it is too long. */ 2089 if (error == ENAMETOOLONG) { 2090 /* 2091 * XXX: copyinstr() isn't documented to populate the 2092 * array completely, so do a copyin() to be on the 2093 * safe side. This should be changed in case 2094 * copyinstr() is changed to guarantee this. 2095 */ 2096 error = copyin((void *)(register_t)args->arg2, comm, 2097 max_size - 1); 2098 comm[max_size - 1] = '\0'; 2099 } 2100 if (error) 2101 return (error); 2102 2103 PROC_LOCK(p); 2104 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 2105 PROC_UNLOCK(p); 2106 break; 2107 case LINUX_PR_GET_NAME: 2108 PROC_LOCK(p); 2109 strlcpy(comm, p->p_comm, sizeof(comm)); 2110 PROC_UNLOCK(p); 2111 error = copyout(comm, (void *)(register_t)args->arg2, 2112 strlen(comm) + 1); 2113 break; 2114 case LINUX_PR_GET_SECCOMP: 2115 case LINUX_PR_SET_SECCOMP: 2116 /* 2117 * Same as returned by Linux without CONFIG_SECCOMP enabled. 2118 */ 2119 error = EINVAL; 2120 break; 2121 case LINUX_PR_CAPBSET_READ: 2122 #if 0 2123 /* 2124 * This makes too much noise with Ubuntu Focal. 2125 */ 2126 linux_msg(td, "unsupported prctl PR_CAPBSET_READ %d", 2127 (int)args->arg2); 2128 #endif 2129 error = EINVAL; 2130 break; 2131 case LINUX_PR_SET_NO_NEW_PRIVS: 2132 arg = args->arg2 == 1 ? 2133 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE; 2134 error = kern_procctl(td, P_PID, p->p_pid, 2135 PROC_NO_NEW_PRIVS_CTL, &arg); 2136 break; 2137 case LINUX_PR_SET_PTRACER: 2138 linux_msg(td, "unsupported prctl PR_SET_PTRACER"); 2139 error = EINVAL; 2140 break; 2141 default: 2142 linux_msg(td, "unsupported prctl option %d", args->option); 2143 error = EINVAL; 2144 break; 2145 } 2146 2147 return (error); 2148 } 2149 2150 int 2151 linux_sched_setparam(struct thread *td, 2152 struct linux_sched_setparam_args *uap) 2153 { 2154 struct sched_param sched_param; 2155 struct thread *tdt; 2156 int error, policy; 2157 2158 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 2159 if (error) 2160 return (error); 2161 2162 tdt = linux_tdfind(td, uap->pid, -1); 2163 if (tdt == NULL) 2164 return (ESRCH); 2165 2166 if (linux_map_sched_prio) { 2167 error = kern_sched_getscheduler(td, tdt, &policy); 2168 if (error) 2169 goto out; 2170 2171 switch (policy) { 2172 case SCHED_OTHER: 2173 if (sched_param.sched_priority != 0) { 2174 error = EINVAL; 2175 goto out; 2176 } 2177 sched_param.sched_priority = 2178 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 2179 break; 2180 case SCHED_FIFO: 2181 case SCHED_RR: 2182 if (sched_param.sched_priority < 1 || 2183 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) { 2184 error = EINVAL; 2185 goto out; 2186 } 2187 /* 2188 * Map [1, LINUX_MAX_RT_PRIO - 1] to 2189 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 2190 */ 2191 sched_param.sched_priority = 2192 (sched_param.sched_priority - 1) * 2193 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 2194 (LINUX_MAX_RT_PRIO - 1); 2195 break; 2196 } 2197 } 2198 2199 error = kern_sched_setparam(td, tdt, &sched_param); 2200 out: PROC_UNLOCK(tdt->td_proc); 2201 return (error); 2202 } 2203 2204 int 2205 linux_sched_getparam(struct thread *td, 2206 struct linux_sched_getparam_args *uap) 2207 { 2208 struct sched_param sched_param; 2209 struct thread *tdt; 2210 int error, policy; 2211 2212 tdt = linux_tdfind(td, uap->pid, -1); 2213 if (tdt == NULL) 2214 return (ESRCH); 2215 2216 error = kern_sched_getparam(td, tdt, &sched_param); 2217 if (error) { 2218 PROC_UNLOCK(tdt->td_proc); 2219 return (error); 2220 } 2221 2222 if (linux_map_sched_prio) { 2223 error = kern_sched_getscheduler(td, tdt, &policy); 2224 PROC_UNLOCK(tdt->td_proc); 2225 if (error) 2226 return (error); 2227 2228 switch (policy) { 2229 case SCHED_OTHER: 2230 sched_param.sched_priority = 0; 2231 break; 2232 case SCHED_FIFO: 2233 case SCHED_RR: 2234 /* 2235 * Map [0, RTP_PRIO_MAX - RTP_PRIO_MIN] to 2236 * [1, LINUX_MAX_RT_PRIO - 1] (rounding up). 2237 */ 2238 sched_param.sched_priority = 2239 (sched_param.sched_priority * 2240 (LINUX_MAX_RT_PRIO - 1) + 2241 (RTP_PRIO_MAX - RTP_PRIO_MIN - 1)) / 2242 (RTP_PRIO_MAX - RTP_PRIO_MIN) + 1; 2243 break; 2244 } 2245 } else 2246 PROC_UNLOCK(tdt->td_proc); 2247 2248 error = copyout(&sched_param, uap->param, sizeof(sched_param)); 2249 return (error); 2250 } 2251 2252 /* 2253 * Get affinity of a process. 2254 */ 2255 int 2256 linux_sched_getaffinity(struct thread *td, 2257 struct linux_sched_getaffinity_args *args) 2258 { 2259 int error; 2260 struct thread *tdt; 2261 2262 if (args->len < sizeof(cpuset_t)) 2263 return (EINVAL); 2264 2265 tdt = linux_tdfind(td, args->pid, -1); 2266 if (tdt == NULL) 2267 return (ESRCH); 2268 2269 PROC_UNLOCK(tdt->td_proc); 2270 2271 error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2272 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr); 2273 if (error == 0) 2274 td->td_retval[0] = sizeof(cpuset_t); 2275 2276 return (error); 2277 } 2278 2279 /* 2280 * Set affinity of a process. 2281 */ 2282 int 2283 linux_sched_setaffinity(struct thread *td, 2284 struct linux_sched_setaffinity_args *args) 2285 { 2286 struct thread *tdt; 2287 2288 if (args->len < sizeof(cpuset_t)) 2289 return (EINVAL); 2290 2291 tdt = linux_tdfind(td, args->pid, -1); 2292 if (tdt == NULL) 2293 return (ESRCH); 2294 2295 PROC_UNLOCK(tdt->td_proc); 2296 2297 return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2298 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr)); 2299 } 2300 2301 struct linux_rlimit64 { 2302 uint64_t rlim_cur; 2303 uint64_t rlim_max; 2304 }; 2305 2306 int 2307 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2308 { 2309 struct rlimit rlim, nrlim; 2310 struct linux_rlimit64 lrlim; 2311 struct proc *p; 2312 u_int which; 2313 int flags; 2314 int error; 2315 2316 if (args->new == NULL && args->old != NULL) { 2317 if (linux_get_dummy_limit(args->resource, &rlim)) { 2318 lrlim.rlim_cur = rlim.rlim_cur; 2319 lrlim.rlim_max = rlim.rlim_max; 2320 return (copyout(&lrlim, args->old, sizeof(lrlim))); 2321 } 2322 } 2323 2324 if (args->resource >= LINUX_RLIM_NLIMITS) 2325 return (EINVAL); 2326 2327 which = linux_to_bsd_resource[args->resource]; 2328 if (which == -1) 2329 return (EINVAL); 2330 2331 if (args->new != NULL) { 2332 /* 2333 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2334 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2335 * as INFINITY so we do not need a conversion even. 2336 */ 2337 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2338 if (error != 0) 2339 return (error); 2340 } 2341 2342 flags = PGET_HOLD | PGET_NOTWEXIT; 2343 if (args->new != NULL) 2344 flags |= PGET_CANDEBUG; 2345 else 2346 flags |= PGET_CANSEE; 2347 if (args->pid == 0) { 2348 p = td->td_proc; 2349 PHOLD(p); 2350 } else { 2351 error = pget(args->pid, flags, &p); 2352 if (error != 0) 2353 return (error); 2354 } 2355 if (args->old != NULL) { 2356 PROC_LOCK(p); 2357 lim_rlimit_proc(p, which, &rlim); 2358 PROC_UNLOCK(p); 2359 if (rlim.rlim_cur == RLIM_INFINITY) 2360 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2361 else 2362 lrlim.rlim_cur = rlim.rlim_cur; 2363 if (rlim.rlim_max == RLIM_INFINITY) 2364 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2365 else 2366 lrlim.rlim_max = rlim.rlim_max; 2367 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2368 if (error != 0) 2369 goto out; 2370 } 2371 2372 if (args->new != NULL) 2373 error = kern_proc_setrlimit(td, p, which, &nrlim); 2374 2375 out: 2376 PRELE(p); 2377 return (error); 2378 } 2379 2380 int 2381 linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2382 { 2383 struct timespec ts, *tsp; 2384 int error; 2385 2386 if (args->tsp != NULL) { 2387 error = linux_get_timespec(&ts, args->tsp); 2388 if (error != 0) 2389 return (error); 2390 tsp = &ts; 2391 } else 2392 tsp = NULL; 2393 2394 error = linux_common_pselect6(td, args->nfds, args->readfds, 2395 args->writefds, args->exceptfds, tsp, args->sig); 2396 2397 if (args->tsp != NULL) 2398 linux_put_timespec(&ts, args->tsp); 2399 return (error); 2400 } 2401 2402 static int 2403 linux_common_pselect6(struct thread *td, l_int nfds, l_fd_set *readfds, 2404 l_fd_set *writefds, l_fd_set *exceptfds, struct timespec *tsp, 2405 l_uintptr_t *sig) 2406 { 2407 struct timeval utv, tv0, tv1, *tvp; 2408 struct l_pselect6arg lpse6; 2409 l_sigset_t l_ss; 2410 sigset_t *ssp; 2411 sigset_t ss; 2412 int error; 2413 2414 ssp = NULL; 2415 if (sig != NULL) { 2416 error = copyin(sig, &lpse6, sizeof(lpse6)); 2417 if (error != 0) 2418 return (error); 2419 if (lpse6.ss_len != sizeof(l_ss)) 2420 return (EINVAL); 2421 if (lpse6.ss != 0) { 2422 error = copyin(PTRIN(lpse6.ss), &l_ss, 2423 sizeof(l_ss)); 2424 if (error != 0) 2425 return (error); 2426 linux_to_bsd_sigset(&l_ss, &ss); 2427 ssp = &ss; 2428 } 2429 } else 2430 ssp = NULL; 2431 2432 /* 2433 * Currently glibc changes nanosecond number to microsecond. 2434 * This mean losing precision but for now it is hardly seen. 2435 */ 2436 if (tsp != NULL) { 2437 TIMESPEC_TO_TIMEVAL(&utv, tsp); 2438 if (itimerfix(&utv)) 2439 return (EINVAL); 2440 2441 microtime(&tv0); 2442 tvp = &utv; 2443 } else 2444 tvp = NULL; 2445 2446 error = kern_pselect(td, nfds, readfds, writefds, 2447 exceptfds, tvp, ssp, LINUX_NFDBITS); 2448 2449 if (tsp != NULL) { 2450 /* 2451 * Compute how much time was left of the timeout, 2452 * by subtracting the current time and the time 2453 * before we started the call, and subtracting 2454 * that result from the user-supplied value. 2455 */ 2456 microtime(&tv1); 2457 timevalsub(&tv1, &tv0); 2458 timevalsub(&utv, &tv1); 2459 if (utv.tv_sec < 0) 2460 timevalclear(&utv); 2461 TIMEVAL_TO_TIMESPEC(&utv, tsp); 2462 } 2463 return (error); 2464 } 2465 2466 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 2467 int 2468 linux_pselect6_time64(struct thread *td, 2469 struct linux_pselect6_time64_args *args) 2470 { 2471 struct timespec ts, *tsp; 2472 int error; 2473 2474 if (args->tsp != NULL) { 2475 error = linux_get_timespec64(&ts, args->tsp); 2476 if (error != 0) 2477 return (error); 2478 tsp = &ts; 2479 } else 2480 tsp = NULL; 2481 2482 error = linux_common_pselect6(td, args->nfds, args->readfds, 2483 args->writefds, args->exceptfds, tsp, args->sig); 2484 2485 if (args->tsp != NULL) 2486 linux_put_timespec64(&ts, args->tsp); 2487 return (error); 2488 } 2489 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 2490 2491 int 2492 linux_ppoll(struct thread *td, struct linux_ppoll_args *args) 2493 { 2494 struct timespec uts, *tsp; 2495 int error; 2496 2497 if (args->tsp != NULL) { 2498 error = linux_get_timespec(&uts, args->tsp); 2499 if (error != 0) 2500 return (error); 2501 tsp = &uts; 2502 } else 2503 tsp = NULL; 2504 2505 error = linux_common_ppoll(td, args->fds, args->nfds, tsp, 2506 args->sset, args->ssize); 2507 if (error == 0 && args->tsp != NULL) 2508 error = linux_put_timespec(&uts, args->tsp); 2509 return (error); 2510 } 2511 2512 static int 2513 linux_common_ppoll(struct thread *td, struct pollfd *fds, uint32_t nfds, 2514 struct timespec *tsp, l_sigset_t *sset, l_size_t ssize) 2515 { 2516 struct timespec ts0, ts1; 2517 struct pollfd stackfds[32]; 2518 struct pollfd *kfds; 2519 l_sigset_t l_ss; 2520 sigset_t *ssp; 2521 sigset_t ss; 2522 int error; 2523 2524 if (kern_poll_maxfds(nfds)) 2525 return (EINVAL); 2526 if (sset != NULL) { 2527 if (ssize != sizeof(l_ss)) 2528 return (EINVAL); 2529 error = copyin(sset, &l_ss, sizeof(l_ss)); 2530 if (error) 2531 return (error); 2532 linux_to_bsd_sigset(&l_ss, &ss); 2533 ssp = &ss; 2534 } else 2535 ssp = NULL; 2536 if (tsp != NULL) 2537 nanotime(&ts0); 2538 2539 if (nfds > nitems(stackfds)) 2540 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK); 2541 else 2542 kfds = stackfds; 2543 error = linux_pollin(td, kfds, fds, nfds); 2544 if (error != 0) 2545 goto out; 2546 2547 error = kern_poll_kfds(td, kfds, nfds, tsp, ssp); 2548 if (error == 0) 2549 error = linux_pollout(td, kfds, fds, nfds); 2550 2551 if (error == 0 && tsp != NULL) { 2552 if (td->td_retval[0]) { 2553 nanotime(&ts1); 2554 timespecsub(&ts1, &ts0, &ts1); 2555 timespecsub(tsp, &ts1, tsp); 2556 if (tsp->tv_sec < 0) 2557 timespecclear(tsp); 2558 } else 2559 timespecclear(tsp); 2560 } 2561 2562 out: 2563 if (nfds > nitems(stackfds)) 2564 free(kfds, M_TEMP); 2565 return (error); 2566 } 2567 2568 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 2569 int 2570 linux_ppoll_time64(struct thread *td, struct linux_ppoll_time64_args *args) 2571 { 2572 struct timespec uts, *tsp; 2573 int error; 2574 2575 if (args->tsp != NULL) { 2576 error = linux_get_timespec64(&uts, args->tsp); 2577 if (error != 0) 2578 return (error); 2579 tsp = &uts; 2580 } else 2581 tsp = NULL; 2582 error = linux_common_ppoll(td, args->fds, args->nfds, tsp, 2583 args->sset, args->ssize); 2584 if (error == 0 && args->tsp != NULL) 2585 error = linux_put_timespec64(&uts, args->tsp); 2586 return (error); 2587 } 2588 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 2589 2590 static int 2591 linux_pollin(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 2592 { 2593 int error; 2594 u_int i; 2595 2596 error = copyin(ufds, fds, nfd * sizeof(*fds)); 2597 if (error != 0) 2598 return (error); 2599 2600 for (i = 0; i < nfd; i++) { 2601 if (fds->events != 0) 2602 linux_to_bsd_poll_events(td, fds->fd, 2603 fds->events, &fds->events); 2604 fds++; 2605 } 2606 return (0); 2607 } 2608 2609 static int 2610 linux_pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 2611 { 2612 int error = 0; 2613 u_int i, n = 0; 2614 2615 for (i = 0; i < nfd; i++) { 2616 if (fds->revents != 0) { 2617 bsd_to_linux_poll_events(fds->revents, 2618 &fds->revents); 2619 n++; 2620 } 2621 error = copyout(&fds->revents, &ufds->revents, 2622 sizeof(ufds->revents)); 2623 if (error) 2624 return (error); 2625 fds++; 2626 ufds++; 2627 } 2628 td->td_retval[0] = n; 2629 return (0); 2630 } 2631 2632 static int 2633 linux_sched_rr_get_interval_common(struct thread *td, pid_t pid, 2634 struct timespec *ts) 2635 { 2636 struct thread *tdt; 2637 int error; 2638 2639 /* 2640 * According to man in case the invalid pid specified 2641 * EINVAL should be returned. 2642 */ 2643 if (pid < 0) 2644 return (EINVAL); 2645 2646 tdt = linux_tdfind(td, pid, -1); 2647 if (tdt == NULL) 2648 return (ESRCH); 2649 2650 error = kern_sched_rr_get_interval_td(td, tdt, ts); 2651 PROC_UNLOCK(tdt->td_proc); 2652 return (error); 2653 } 2654 2655 int 2656 linux_sched_rr_get_interval(struct thread *td, 2657 struct linux_sched_rr_get_interval_args *uap) 2658 { 2659 struct timespec ts; 2660 int error; 2661 2662 error = linux_sched_rr_get_interval_common(td, uap->pid, &ts); 2663 if (error != 0) 2664 return (error); 2665 return (linux_put_timespec(&ts, uap->interval)); 2666 } 2667 2668 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 2669 int 2670 linux_sched_rr_get_interval_time64(struct thread *td, 2671 struct linux_sched_rr_get_interval_time64_args *uap) 2672 { 2673 struct timespec ts; 2674 int error; 2675 2676 error = linux_sched_rr_get_interval_common(td, uap->pid, &ts); 2677 if (error != 0) 2678 return (error); 2679 return (linux_put_timespec64(&ts, uap->interval)); 2680 } 2681 #endif 2682 2683 /* 2684 * In case when the Linux thread is the initial thread in 2685 * the thread group thread id is equal to the process id. 2686 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2687 */ 2688 struct thread * 2689 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2690 { 2691 struct linux_emuldata *em; 2692 struct thread *tdt; 2693 struct proc *p; 2694 2695 tdt = NULL; 2696 if (tid == 0 || tid == td->td_tid) { 2697 if (pid != -1 && td->td_proc->p_pid != pid) 2698 return (NULL); 2699 PROC_LOCK(td->td_proc); 2700 return (td); 2701 } else if (tid > PID_MAX) 2702 return (tdfind(tid, pid)); 2703 2704 /* 2705 * Initial thread where the tid equal to the pid. 2706 */ 2707 p = pfind(tid); 2708 if (p != NULL) { 2709 if (SV_PROC_ABI(p) != SV_ABI_LINUX || 2710 (pid != -1 && tid != pid)) { 2711 /* 2712 * p is not a Linuxulator process. 2713 */ 2714 PROC_UNLOCK(p); 2715 return (NULL); 2716 } 2717 FOREACH_THREAD_IN_PROC(p, tdt) { 2718 em = em_find(tdt); 2719 if (tid == em->em_tid) 2720 return (tdt); 2721 } 2722 PROC_UNLOCK(p); 2723 } 2724 return (NULL); 2725 } 2726 2727 void 2728 linux_to_bsd_waitopts(int options, int *bsdopts) 2729 { 2730 2731 if (options & LINUX_WNOHANG) 2732 *bsdopts |= WNOHANG; 2733 if (options & LINUX_WUNTRACED) 2734 *bsdopts |= WUNTRACED; 2735 if (options & LINUX_WEXITED) 2736 *bsdopts |= WEXITED; 2737 if (options & LINUX_WCONTINUED) 2738 *bsdopts |= WCONTINUED; 2739 if (options & LINUX_WNOWAIT) 2740 *bsdopts |= WNOWAIT; 2741 2742 if (options & __WCLONE) 2743 *bsdopts |= WLINUXCLONE; 2744 } 2745 2746 int 2747 linux_getrandom(struct thread *td, struct linux_getrandom_args *args) 2748 { 2749 struct uio uio; 2750 struct iovec iov; 2751 int error; 2752 2753 if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM)) 2754 return (EINVAL); 2755 if (args->count > INT_MAX) 2756 args->count = INT_MAX; 2757 2758 iov.iov_base = args->buf; 2759 iov.iov_len = args->count; 2760 2761 uio.uio_iov = &iov; 2762 uio.uio_iovcnt = 1; 2763 uio.uio_resid = iov.iov_len; 2764 uio.uio_segflg = UIO_USERSPACE; 2765 uio.uio_rw = UIO_READ; 2766 uio.uio_td = td; 2767 2768 error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK); 2769 if (error == 0) 2770 td->td_retval[0] = args->count - uio.uio_resid; 2771 return (error); 2772 } 2773 2774 int 2775 linux_mincore(struct thread *td, struct linux_mincore_args *args) 2776 { 2777 2778 /* Needs to be page-aligned */ 2779 if (args->start & PAGE_MASK) 2780 return (EINVAL); 2781 return (kern_mincore(td, args->start, args->len, args->vec)); 2782 } 2783 2784 #define SYSLOG_TAG "<6>" 2785 2786 int 2787 linux_syslog(struct thread *td, struct linux_syslog_args *args) 2788 { 2789 char buf[128], *src, *dst; 2790 u_int seq; 2791 int buflen, error; 2792 2793 if (args->type != LINUX_SYSLOG_ACTION_READ_ALL) { 2794 linux_msg(td, "syslog unsupported type 0x%x", args->type); 2795 return (EINVAL); 2796 } 2797 2798 if (args->len < 6) { 2799 td->td_retval[0] = 0; 2800 return (0); 2801 } 2802 2803 error = priv_check(td, PRIV_MSGBUF); 2804 if (error) 2805 return (error); 2806 2807 mtx_lock(&msgbuf_lock); 2808 msgbuf_peekbytes(msgbufp, NULL, 0, &seq); 2809 mtx_unlock(&msgbuf_lock); 2810 2811 dst = args->buf; 2812 error = copyout(&SYSLOG_TAG, dst, sizeof(SYSLOG_TAG)); 2813 /* The -1 is to skip the trailing '\0'. */ 2814 dst += sizeof(SYSLOG_TAG) - 1; 2815 2816 while (error == 0) { 2817 mtx_lock(&msgbuf_lock); 2818 buflen = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq); 2819 mtx_unlock(&msgbuf_lock); 2820 2821 if (buflen == 0) 2822 break; 2823 2824 for (src = buf; src < buf + buflen && error == 0; src++) { 2825 if (*src == '\0') 2826 continue; 2827 2828 if (dst >= args->buf + args->len) 2829 goto out; 2830 2831 error = copyout(src, dst, 1); 2832 dst++; 2833 2834 if (*src == '\n' && *(src + 1) != '<' && 2835 dst + sizeof(SYSLOG_TAG) < args->buf + args->len) { 2836 error = copyout(&SYSLOG_TAG, 2837 dst, sizeof(SYSLOG_TAG)); 2838 dst += sizeof(SYSLOG_TAG) - 1; 2839 } 2840 } 2841 } 2842 out: 2843 td->td_retval[0] = dst - args->buf; 2844 return (error); 2845 } 2846 2847 int 2848 linux_getcpu(struct thread *td, struct linux_getcpu_args *args) 2849 { 2850 int cpu, error, node; 2851 2852 cpu = td->td_oncpu; /* Make sure it doesn't change during copyout(9) */ 2853 error = 0; 2854 node = cpuid_to_pcpu[cpu]->pc_domain; 2855 2856 if (args->cpu != NULL) 2857 error = copyout(&cpu, args->cpu, sizeof(l_int)); 2858 if (args->node != NULL) 2859 error = copyout(&node, args->node, sizeof(l_int)); 2860 return (error); 2861 } 2862 2863 #if defined(__i386__) || defined(__amd64__) 2864 int 2865 linux_poll(struct thread *td, struct linux_poll_args *args) 2866 { 2867 struct timespec ts, *tsp; 2868 2869 if (args->timeout != INFTIM) { 2870 if (args->timeout < 0) 2871 return (EINVAL); 2872 ts.tv_sec = args->timeout / 1000; 2873 ts.tv_nsec = (args->timeout % 1000) * 1000000; 2874 tsp = &ts; 2875 } else 2876 tsp = NULL; 2877 2878 return (linux_common_ppoll(td, args->fds, args->nfds, 2879 tsp, NULL, 0)); 2880 } 2881 #endif /* __i386__ || __amd64__ */ 2882 2883 int 2884 linux_seccomp(struct thread *td, struct linux_seccomp_args *args) 2885 { 2886 2887 switch (args->op) { 2888 case LINUX_SECCOMP_GET_ACTION_AVAIL: 2889 return (EOPNOTSUPP); 2890 default: 2891 /* 2892 * Ignore unknown operations, just like Linux kernel built 2893 * without CONFIG_SECCOMP. 2894 */ 2895 return (EINVAL); 2896 } 2897 } 2898