1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Doug Rabson 5 * Copyright (c) 1994-1995 Søren Schmidt 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/blist.h> 39 #include <sys/fcntl.h> 40 #if defined(__i386__) 41 #include <sys/imgact_aout.h> 42 #endif 43 #include <sys/jail.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mman.h> 49 #include <sys/mount.h> 50 #include <sys/msgbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/namei.h> 53 #include <sys/poll.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/procctl.h> 57 #include <sys/reboot.h> 58 #include <sys/racct.h> 59 #include <sys/random.h> 60 #include <sys/resourcevar.h> 61 #include <sys/sched.h> 62 #include <sys/sdt.h> 63 #include <sys/signalvar.h> 64 #include <sys/stat.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/sysctl.h> 67 #include <sys/sysproto.h> 68 #include <sys/systm.h> 69 #include <sys/time.h> 70 #include <sys/vmmeter.h> 71 #include <sys/vnode.h> 72 #include <sys/wait.h> 73 #include <sys/cpuset.h> 74 #include <sys/uio.h> 75 76 #include <security/mac/mac_framework.h> 77 78 #include <vm/vm.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_kern.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_extern.h> 83 #include <vm/swap_pager.h> 84 85 #ifdef COMPAT_LINUX32 86 #include <machine/../linux32/linux.h> 87 #include <machine/../linux32/linux32_proto.h> 88 #else 89 #include <machine/../linux/linux.h> 90 #include <machine/../linux/linux_proto.h> 91 #endif 92 93 #include <compat/linux/linux_common.h> 94 #include <compat/linux/linux_dtrace.h> 95 #include <compat/linux/linux_file.h> 96 #include <compat/linux/linux_mib.h> 97 #include <compat/linux/linux_signal.h> 98 #include <compat/linux/linux_timer.h> 99 #include <compat/linux/linux_util.h> 100 #include <compat/linux/linux_sysproto.h> 101 #include <compat/linux/linux_emul.h> 102 #include <compat/linux/linux_misc.h> 103 104 int stclohz; /* Statistics clock frequency */ 105 106 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 107 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 108 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 109 RLIMIT_MEMLOCK, RLIMIT_AS 110 }; 111 112 struct l_sysinfo { 113 l_long uptime; /* Seconds since boot */ 114 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 115 #define LINUX_SYSINFO_LOADS_SCALE 65536 116 l_ulong totalram; /* Total usable main memory size */ 117 l_ulong freeram; /* Available memory size */ 118 l_ulong sharedram; /* Amount of shared memory */ 119 l_ulong bufferram; /* Memory used by buffers */ 120 l_ulong totalswap; /* Total swap space size */ 121 l_ulong freeswap; /* swap space still available */ 122 l_ushort procs; /* Number of current processes */ 123 l_ushort pads; 124 l_ulong totalhigh; 125 l_ulong freehigh; 126 l_uint mem_unit; 127 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 128 }; 129 130 struct l_pselect6arg { 131 l_uintptr_t ss; 132 l_size_t ss_len; 133 }; 134 135 static int linux_utimensat_lts_to_ts(struct l_timespec *, 136 struct timespec *); 137 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 138 static int linux_utimensat_lts64_to_ts(struct l_timespec64 *, 139 struct timespec *); 140 #endif 141 static int linux_common_utimensat(struct thread *, int, 142 const char *, struct timespec *, int); 143 static int linux_common_pselect6(struct thread *, l_int, 144 l_fd_set *, l_fd_set *, l_fd_set *, 145 struct timespec *, l_uintptr_t *); 146 static int linux_common_ppoll(struct thread *, struct pollfd *, 147 uint32_t, struct timespec *, l_sigset_t *, 148 l_size_t); 149 static int linux_pollin(struct thread *, struct pollfd *, 150 struct pollfd *, u_int); 151 static int linux_pollout(struct thread *, struct pollfd *, 152 struct pollfd *, u_int); 153 154 int 155 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 156 { 157 struct l_sysinfo sysinfo; 158 int i, j; 159 struct timespec ts; 160 161 bzero(&sysinfo, sizeof(sysinfo)); 162 getnanouptime(&ts); 163 if (ts.tv_nsec != 0) 164 ts.tv_sec++; 165 sysinfo.uptime = ts.tv_sec; 166 167 /* Use the information from the mib to get our load averages */ 168 for (i = 0; i < 3; i++) 169 sysinfo.loads[i] = averunnable.ldavg[i] * 170 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 171 172 sysinfo.totalram = physmem * PAGE_SIZE; 173 sysinfo.freeram = (u_long)vm_free_count() * PAGE_SIZE; 174 175 /* 176 * sharedram counts pages allocated to named, swap-backed objects such 177 * as shared memory segments and tmpfs files. There is no cheap way to 178 * compute this, so just leave the field unpopulated. Linux itself only 179 * started setting this field in the 3.x timeframe. 180 */ 181 sysinfo.sharedram = 0; 182 sysinfo.bufferram = 0; 183 184 swap_pager_status(&i, &j); 185 sysinfo.totalswap = i * PAGE_SIZE; 186 sysinfo.freeswap = (i - j) * PAGE_SIZE; 187 188 sysinfo.procs = nprocs; 189 190 /* 191 * Platforms supported by the emulation layer do not have a notion of 192 * high memory. 193 */ 194 sysinfo.totalhigh = 0; 195 sysinfo.freehigh = 0; 196 197 sysinfo.mem_unit = 1; 198 199 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 200 } 201 202 #ifdef LINUX_LEGACY_SYSCALLS 203 int 204 linux_alarm(struct thread *td, struct linux_alarm_args *args) 205 { 206 struct itimerval it, old_it; 207 u_int secs; 208 int error __diagused; 209 210 secs = args->secs; 211 /* 212 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2 213 * to match kern_setitimer()'s limit to avoid error from it. 214 * 215 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit 216 * platforms. 217 */ 218 if (secs > INT32_MAX / 2) 219 secs = INT32_MAX / 2; 220 221 it.it_value.tv_sec = secs; 222 it.it_value.tv_usec = 0; 223 timevalclear(&it.it_interval); 224 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 225 KASSERT(error == 0, ("kern_setitimer returns %d", error)); 226 227 if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) || 228 old_it.it_value.tv_usec >= 500000) 229 old_it.it_value.tv_sec++; 230 td->td_retval[0] = old_it.it_value.tv_sec; 231 return (0); 232 } 233 #endif 234 235 int 236 linux_brk(struct thread *td, struct linux_brk_args *args) 237 { 238 struct vmspace *vm = td->td_proc->p_vmspace; 239 uintptr_t new, old; 240 241 old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize); 242 new = (uintptr_t)args->dsend; 243 if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new)) 244 td->td_retval[0] = (register_t)new; 245 else 246 td->td_retval[0] = (register_t)old; 247 248 return (0); 249 } 250 251 #if defined(__i386__) 252 /* XXX: what about amd64/linux32? */ 253 254 int 255 linux_uselib(struct thread *td, struct linux_uselib_args *args) 256 { 257 struct nameidata ni; 258 struct vnode *vp; 259 struct exec *a_out; 260 vm_map_t map; 261 vm_map_entry_t entry; 262 struct vattr attr; 263 vm_offset_t vmaddr; 264 unsigned long file_offset; 265 unsigned long bss_size; 266 char *library; 267 ssize_t aresid; 268 int error; 269 bool locked, opened, textset; 270 271 a_out = NULL; 272 vp = NULL; 273 locked = false; 274 textset = false; 275 opened = false; 276 277 if (!LUSECONVPATH(td)) { 278 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 279 UIO_USERSPACE, args->library); 280 error = namei(&ni); 281 } else { 282 LCONVPATHEXIST(args->library, &library); 283 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 284 UIO_SYSSPACE, library); 285 error = namei(&ni); 286 LFREEPATH(library); 287 } 288 if (error) 289 goto cleanup; 290 291 vp = ni.ni_vp; 292 NDFREE(&ni, NDF_ONLY_PNBUF); 293 294 /* 295 * From here on down, we have a locked vnode that must be unlocked. 296 * XXX: The code below largely duplicates exec_check_permissions(). 297 */ 298 locked = true; 299 300 /* Executable? */ 301 error = VOP_GETATTR(vp, &attr, td->td_ucred); 302 if (error) 303 goto cleanup; 304 305 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 306 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 307 /* EACCESS is what exec(2) returns. */ 308 error = ENOEXEC; 309 goto cleanup; 310 } 311 312 /* Sensible size? */ 313 if (attr.va_size == 0) { 314 error = ENOEXEC; 315 goto cleanup; 316 } 317 318 /* Can we access it? */ 319 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 320 if (error) 321 goto cleanup; 322 323 /* 324 * XXX: This should use vn_open() so that it is properly authorized, 325 * and to reduce code redundancy all over the place here. 326 * XXX: Not really, it duplicates far more of exec_check_permissions() 327 * than vn_open(). 328 */ 329 #ifdef MAC 330 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 331 if (error) 332 goto cleanup; 333 #endif 334 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 335 if (error) 336 goto cleanup; 337 opened = true; 338 339 /* Pull in executable header into exec_map */ 340 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 341 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 342 if (error) 343 goto cleanup; 344 345 /* Is it a Linux binary ? */ 346 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 347 error = ENOEXEC; 348 goto cleanup; 349 } 350 351 /* 352 * While we are here, we should REALLY do some more checks 353 */ 354 355 /* Set file/virtual offset based on a.out variant. */ 356 switch ((int)(a_out->a_magic & 0xffff)) { 357 case 0413: /* ZMAGIC */ 358 file_offset = 1024; 359 break; 360 case 0314: /* QMAGIC */ 361 file_offset = 0; 362 break; 363 default: 364 error = ENOEXEC; 365 goto cleanup; 366 } 367 368 bss_size = round_page(a_out->a_bss); 369 370 /* Check various fields in header for validity/bounds. */ 371 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 372 error = ENOEXEC; 373 goto cleanup; 374 } 375 376 /* text + data can't exceed file size */ 377 if (a_out->a_data + a_out->a_text > attr.va_size) { 378 error = EFAULT; 379 goto cleanup; 380 } 381 382 /* 383 * text/data/bss must not exceed limits 384 * XXX - this is not complete. it should check current usage PLUS 385 * the resources needed by this library. 386 */ 387 PROC_LOCK(td->td_proc); 388 if (a_out->a_text > maxtsiz || 389 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 390 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 391 bss_size) != 0) { 392 PROC_UNLOCK(td->td_proc); 393 error = ENOMEM; 394 goto cleanup; 395 } 396 PROC_UNLOCK(td->td_proc); 397 398 /* 399 * Prevent more writers. 400 */ 401 error = VOP_SET_TEXT(vp); 402 if (error != 0) 403 goto cleanup; 404 textset = true; 405 406 /* 407 * Lock no longer needed 408 */ 409 locked = false; 410 VOP_UNLOCK(vp); 411 412 /* 413 * Check if file_offset page aligned. Currently we cannot handle 414 * misalinged file offsets, and so we read in the entire image 415 * (what a waste). 416 */ 417 if (file_offset & PAGE_MASK) { 418 /* Map text+data read/write/execute */ 419 420 /* a_entry is the load address and is page aligned */ 421 vmaddr = trunc_page(a_out->a_entry); 422 423 /* get anon user mapping, read+write+execute */ 424 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 425 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 426 VM_PROT_ALL, VM_PROT_ALL, 0); 427 if (error) 428 goto cleanup; 429 430 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 431 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 432 td->td_ucred, NOCRED, &aresid, td); 433 if (error != 0) 434 goto cleanup; 435 if (aresid != 0) { 436 error = ENOEXEC; 437 goto cleanup; 438 } 439 } else { 440 /* 441 * for QMAGIC, a_entry is 20 bytes beyond the load address 442 * to skip the executable header 443 */ 444 vmaddr = trunc_page(a_out->a_entry); 445 446 /* 447 * Map it all into the process's space as a single 448 * copy-on-write "data" segment. 449 */ 450 map = &td->td_proc->p_vmspace->vm_map; 451 error = vm_mmap(map, &vmaddr, 452 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 453 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 454 if (error) 455 goto cleanup; 456 vm_map_lock(map); 457 if (!vm_map_lookup_entry(map, vmaddr, &entry)) { 458 vm_map_unlock(map); 459 error = EDOOFUS; 460 goto cleanup; 461 } 462 entry->eflags |= MAP_ENTRY_VN_EXEC; 463 vm_map_unlock(map); 464 textset = false; 465 } 466 467 if (bss_size != 0) { 468 /* Calculate BSS start address */ 469 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 470 a_out->a_data; 471 472 /* allocate some 'anon' space */ 473 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 474 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 475 VM_PROT_ALL, 0); 476 if (error) 477 goto cleanup; 478 } 479 480 cleanup: 481 if (opened) { 482 if (locked) 483 VOP_UNLOCK(vp); 484 locked = false; 485 VOP_CLOSE(vp, FREAD, td->td_ucred, td); 486 } 487 if (textset) { 488 if (!locked) { 489 locked = true; 490 VOP_LOCK(vp, LK_SHARED | LK_RETRY); 491 } 492 VOP_UNSET_TEXT_CHECKED(vp); 493 } 494 if (locked) 495 VOP_UNLOCK(vp); 496 497 /* Release the temporary mapping. */ 498 if (a_out) 499 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 500 501 return (error); 502 } 503 504 #endif /* __i386__ */ 505 506 #ifdef LINUX_LEGACY_SYSCALLS 507 int 508 linux_select(struct thread *td, struct linux_select_args *args) 509 { 510 l_timeval ltv; 511 struct timeval tv0, tv1, utv, *tvp; 512 int error; 513 514 /* 515 * Store current time for computation of the amount of 516 * time left. 517 */ 518 if (args->timeout) { 519 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 520 goto select_out; 521 utv.tv_sec = ltv.tv_sec; 522 utv.tv_usec = ltv.tv_usec; 523 524 if (itimerfix(&utv)) { 525 /* 526 * The timeval was invalid. Convert it to something 527 * valid that will act as it does under Linux. 528 */ 529 utv.tv_sec += utv.tv_usec / 1000000; 530 utv.tv_usec %= 1000000; 531 if (utv.tv_usec < 0) { 532 utv.tv_sec -= 1; 533 utv.tv_usec += 1000000; 534 } 535 if (utv.tv_sec < 0) 536 timevalclear(&utv); 537 } 538 microtime(&tv0); 539 tvp = &utv; 540 } else 541 tvp = NULL; 542 543 error = kern_select(td, args->nfds, args->readfds, args->writefds, 544 args->exceptfds, tvp, LINUX_NFDBITS); 545 if (error) 546 goto select_out; 547 548 if (args->timeout) { 549 if (td->td_retval[0]) { 550 /* 551 * Compute how much time was left of the timeout, 552 * by subtracting the current time and the time 553 * before we started the call, and subtracting 554 * that result from the user-supplied value. 555 */ 556 microtime(&tv1); 557 timevalsub(&tv1, &tv0); 558 timevalsub(&utv, &tv1); 559 if (utv.tv_sec < 0) 560 timevalclear(&utv); 561 } else 562 timevalclear(&utv); 563 ltv.tv_sec = utv.tv_sec; 564 ltv.tv_usec = utv.tv_usec; 565 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 566 goto select_out; 567 } 568 569 select_out: 570 return (error); 571 } 572 #endif 573 574 int 575 linux_mremap(struct thread *td, struct linux_mremap_args *args) 576 { 577 uintptr_t addr; 578 size_t len; 579 int error = 0; 580 581 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 582 td->td_retval[0] = 0; 583 return (EINVAL); 584 } 585 586 /* 587 * Check for the page alignment. 588 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 589 */ 590 if (args->addr & PAGE_MASK) { 591 td->td_retval[0] = 0; 592 return (EINVAL); 593 } 594 595 args->new_len = round_page(args->new_len); 596 args->old_len = round_page(args->old_len); 597 598 if (args->new_len > args->old_len) { 599 td->td_retval[0] = 0; 600 return (ENOMEM); 601 } 602 603 if (args->new_len < args->old_len) { 604 addr = args->addr + args->new_len; 605 len = args->old_len - args->new_len; 606 error = kern_munmap(td, addr, len); 607 } 608 609 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 610 return (error); 611 } 612 613 #define LINUX_MS_ASYNC 0x0001 614 #define LINUX_MS_INVALIDATE 0x0002 615 #define LINUX_MS_SYNC 0x0004 616 617 int 618 linux_msync(struct thread *td, struct linux_msync_args *args) 619 { 620 621 return (kern_msync(td, args->addr, args->len, 622 args->fl & ~LINUX_MS_SYNC)); 623 } 624 625 #ifdef LINUX_LEGACY_SYSCALLS 626 int 627 linux_time(struct thread *td, struct linux_time_args *args) 628 { 629 struct timeval tv; 630 l_time_t tm; 631 int error; 632 633 microtime(&tv); 634 tm = tv.tv_sec; 635 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 636 return (error); 637 td->td_retval[0] = tm; 638 return (0); 639 } 640 #endif 641 642 struct l_times_argv { 643 l_clock_t tms_utime; 644 l_clock_t tms_stime; 645 l_clock_t tms_cutime; 646 l_clock_t tms_cstime; 647 }; 648 649 /* 650 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 651 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 652 * auxiliary vector entry. 653 */ 654 #define CLK_TCK 100 655 656 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 657 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 658 659 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 660 CONVNTCK(r) : CONVOTCK(r)) 661 662 int 663 linux_times(struct thread *td, struct linux_times_args *args) 664 { 665 struct timeval tv, utime, stime, cutime, cstime; 666 struct l_times_argv tms; 667 struct proc *p; 668 int error; 669 670 if (args->buf != NULL) { 671 p = td->td_proc; 672 PROC_LOCK(p); 673 PROC_STATLOCK(p); 674 calcru(p, &utime, &stime); 675 PROC_STATUNLOCK(p); 676 calccru(p, &cutime, &cstime); 677 PROC_UNLOCK(p); 678 679 tms.tms_utime = CONVTCK(utime); 680 tms.tms_stime = CONVTCK(stime); 681 682 tms.tms_cutime = CONVTCK(cutime); 683 tms.tms_cstime = CONVTCK(cstime); 684 685 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 686 return (error); 687 } 688 689 microuptime(&tv); 690 td->td_retval[0] = (int)CONVTCK(tv); 691 return (0); 692 } 693 694 int 695 linux_newuname(struct thread *td, struct linux_newuname_args *args) 696 { 697 struct l_new_utsname utsname; 698 char osname[LINUX_MAX_UTSNAME]; 699 char osrelease[LINUX_MAX_UTSNAME]; 700 char *p; 701 702 linux_get_osname(td, osname); 703 linux_get_osrelease(td, osrelease); 704 705 bzero(&utsname, sizeof(utsname)); 706 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 707 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 708 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 709 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 710 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 711 for (p = utsname.version; *p != '\0'; ++p) 712 if (*p == '\n') { 713 *p = '\0'; 714 break; 715 } 716 #if defined(__amd64__) 717 /* 718 * On amd64, Linux uname(2) needs to return "x86_64" 719 * for both 64-bit and 32-bit applications. On 32-bit, 720 * the string returned by getauxval(AT_PLATFORM) needs 721 * to remain "i686", though. 722 */ 723 strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME); 724 #elif defined(__aarch64__) 725 strlcpy(utsname.machine, "aarch64", LINUX_MAX_UTSNAME); 726 #elif defined(__i386__) 727 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME); 728 #endif 729 730 return (copyout(&utsname, args->buf, sizeof(utsname))); 731 } 732 733 struct l_utimbuf { 734 l_time_t l_actime; 735 l_time_t l_modtime; 736 }; 737 738 #ifdef LINUX_LEGACY_SYSCALLS 739 int 740 linux_utime(struct thread *td, struct linux_utime_args *args) 741 { 742 struct timeval tv[2], *tvp; 743 struct l_utimbuf lut; 744 char *fname; 745 int error; 746 747 if (args->times) { 748 if ((error = copyin(args->times, &lut, sizeof lut)) != 0) 749 return (error); 750 tv[0].tv_sec = lut.l_actime; 751 tv[0].tv_usec = 0; 752 tv[1].tv_sec = lut.l_modtime; 753 tv[1].tv_usec = 0; 754 tvp = tv; 755 } else 756 tvp = NULL; 757 758 if (!LUSECONVPATH(td)) { 759 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 760 tvp, UIO_SYSSPACE); 761 } else { 762 LCONVPATHEXIST(args->fname, &fname); 763 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp, 764 UIO_SYSSPACE); 765 LFREEPATH(fname); 766 } 767 return (error); 768 } 769 #endif 770 771 #ifdef LINUX_LEGACY_SYSCALLS 772 int 773 linux_utimes(struct thread *td, struct linux_utimes_args *args) 774 { 775 l_timeval ltv[2]; 776 struct timeval tv[2], *tvp = NULL; 777 char *fname; 778 int error; 779 780 if (args->tptr != NULL) { 781 if ((error = copyin(args->tptr, ltv, sizeof ltv)) != 0) 782 return (error); 783 tv[0].tv_sec = ltv[0].tv_sec; 784 tv[0].tv_usec = ltv[0].tv_usec; 785 tv[1].tv_sec = ltv[1].tv_sec; 786 tv[1].tv_usec = ltv[1].tv_usec; 787 tvp = tv; 788 } 789 790 if (!LUSECONVPATH(td)) { 791 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 792 tvp, UIO_SYSSPACE); 793 } else { 794 LCONVPATHEXIST(args->fname, &fname); 795 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, 796 tvp, UIO_SYSSPACE); 797 LFREEPATH(fname); 798 } 799 return (error); 800 } 801 #endif 802 803 static int 804 linux_utimensat_lts_to_ts(struct l_timespec *l_times, struct timespec *times) 805 { 806 807 if (l_times->tv_nsec != LINUX_UTIME_OMIT && 808 l_times->tv_nsec != LINUX_UTIME_NOW && 809 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999)) 810 return (EINVAL); 811 812 times->tv_sec = l_times->tv_sec; 813 switch (l_times->tv_nsec) 814 { 815 case LINUX_UTIME_OMIT: 816 times->tv_nsec = UTIME_OMIT; 817 break; 818 case LINUX_UTIME_NOW: 819 times->tv_nsec = UTIME_NOW; 820 break; 821 default: 822 times->tv_nsec = l_times->tv_nsec; 823 } 824 825 return (0); 826 } 827 828 static int 829 linux_common_utimensat(struct thread *td, int ldfd, const char *pathname, 830 struct timespec *timesp, int lflags) 831 { 832 char *path = NULL; 833 int error, dfd, flags = 0; 834 835 dfd = (ldfd == LINUX_AT_FDCWD) ? AT_FDCWD : ldfd; 836 837 if (lflags & ~(LINUX_AT_SYMLINK_NOFOLLOW | LINUX_AT_EMPTY_PATH)) 838 return (EINVAL); 839 840 if (timesp != NULL) { 841 /* This breaks POSIX, but is what the Linux kernel does 842 * _on purpose_ (documented in the man page for utimensat(2)), 843 * so we must follow that behaviour. */ 844 if (timesp[0].tv_nsec == UTIME_OMIT && 845 timesp[1].tv_nsec == UTIME_OMIT) 846 return (0); 847 } 848 849 if (lflags & LINUX_AT_SYMLINK_NOFOLLOW) 850 flags |= AT_SYMLINK_NOFOLLOW; 851 if (lflags & LINUX_AT_EMPTY_PATH) 852 flags |= AT_EMPTY_PATH; 853 854 if (!LUSECONVPATH(td)) { 855 if (pathname != NULL) { 856 return (kern_utimensat(td, dfd, pathname, 857 UIO_USERSPACE, timesp, UIO_SYSSPACE, flags)); 858 } 859 } 860 861 if (pathname != NULL) 862 LCONVPATHEXIST_AT(pathname, &path, dfd); 863 else if (lflags != 0) 864 return (EINVAL); 865 866 if (path == NULL) 867 error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE); 868 else { 869 error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp, 870 UIO_SYSSPACE, flags); 871 LFREEPATH(path); 872 } 873 874 return (error); 875 } 876 877 int 878 linux_utimensat(struct thread *td, struct linux_utimensat_args *args) 879 { 880 struct l_timespec l_times[2]; 881 struct timespec times[2], *timesp; 882 int error; 883 884 if (args->times != NULL) { 885 error = copyin(args->times, l_times, sizeof(l_times)); 886 if (error != 0) 887 return (error); 888 889 error = linux_utimensat_lts_to_ts(&l_times[0], ×[0]); 890 if (error != 0) 891 return (error); 892 error = linux_utimensat_lts_to_ts(&l_times[1], ×[1]); 893 if (error != 0) 894 return (error); 895 timesp = times; 896 } else 897 timesp = NULL; 898 899 return (linux_common_utimensat(td, args->dfd, args->pathname, 900 timesp, args->flags)); 901 } 902 903 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 904 static int 905 linux_utimensat_lts64_to_ts(struct l_timespec64 *l_times, struct timespec *times) 906 { 907 908 if (l_times->tv_nsec != LINUX_UTIME_OMIT && 909 l_times->tv_nsec != LINUX_UTIME_NOW && 910 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999)) 911 return (EINVAL); 912 913 times->tv_sec = l_times->tv_sec; 914 switch (l_times->tv_nsec) 915 { 916 case LINUX_UTIME_OMIT: 917 times->tv_nsec = UTIME_OMIT; 918 break; 919 case LINUX_UTIME_NOW: 920 times->tv_nsec = UTIME_NOW; 921 break; 922 default: 923 times->tv_nsec = l_times->tv_nsec; 924 } 925 926 return (0); 927 } 928 929 int 930 linux_utimensat_time64(struct thread *td, struct linux_utimensat_time64_args *args) 931 { 932 struct l_timespec64 l_times[2]; 933 struct timespec times[2], *timesp; 934 int error; 935 936 if (args->times64 != NULL) { 937 error = copyin(args->times64, l_times, sizeof(l_times)); 938 if (error != 0) 939 return (error); 940 941 error = linux_utimensat_lts64_to_ts(&l_times[0], ×[0]); 942 if (error != 0) 943 return (error); 944 error = linux_utimensat_lts64_to_ts(&l_times[1], ×[1]); 945 if (error != 0) 946 return (error); 947 timesp = times; 948 } else 949 timesp = NULL; 950 951 return (linux_common_utimensat(td, args->dfd, args->pathname, 952 timesp, args->flags)); 953 } 954 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 955 956 #ifdef LINUX_LEGACY_SYSCALLS 957 int 958 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 959 { 960 l_timeval ltv[2]; 961 struct timeval tv[2], *tvp = NULL; 962 char *fname; 963 int error, dfd; 964 965 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 966 967 if (args->utimes != NULL) { 968 if ((error = copyin(args->utimes, ltv, sizeof ltv)) != 0) 969 return (error); 970 tv[0].tv_sec = ltv[0].tv_sec; 971 tv[0].tv_usec = ltv[0].tv_usec; 972 tv[1].tv_sec = ltv[1].tv_sec; 973 tv[1].tv_usec = ltv[1].tv_usec; 974 tvp = tv; 975 } 976 977 if (!LUSECONVPATH(td)) { 978 error = kern_utimesat(td, dfd, args->filename, UIO_USERSPACE, 979 tvp, UIO_SYSSPACE); 980 } else { 981 LCONVPATHEXIST_AT(args->filename, &fname, dfd); 982 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, 983 tvp, UIO_SYSSPACE); 984 LFREEPATH(fname); 985 } 986 return (error); 987 } 988 #endif 989 990 static int 991 linux_common_wait(struct thread *td, int pid, int *statusp, 992 int options, struct __wrusage *wrup) 993 { 994 siginfo_t siginfo; 995 idtype_t idtype; 996 id_t id; 997 int error, status, tmpstat; 998 999 if (pid == WAIT_ANY) { 1000 idtype = P_ALL; 1001 id = 0; 1002 } else if (pid < 0) { 1003 idtype = P_PGID; 1004 id = (id_t)-pid; 1005 } else { 1006 idtype = P_PID; 1007 id = (id_t)pid; 1008 } 1009 1010 /* 1011 * For backward compatibility we implicitly add flags WEXITED 1012 * and WTRAPPED here. 1013 */ 1014 options |= WEXITED | WTRAPPED; 1015 error = kern_wait6(td, idtype, id, &status, options, wrup, &siginfo); 1016 if (error) 1017 return (error); 1018 1019 if (statusp) { 1020 tmpstat = status & 0xffff; 1021 if (WIFSIGNALED(tmpstat)) { 1022 tmpstat = (tmpstat & 0xffffff80) | 1023 bsd_to_linux_signal(WTERMSIG(tmpstat)); 1024 } else if (WIFSTOPPED(tmpstat)) { 1025 tmpstat = (tmpstat & 0xffff00ff) | 1026 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8); 1027 #if defined(__aarch64__) || (defined(__amd64__) && !defined(COMPAT_LINUX32)) 1028 if (WSTOPSIG(status) == SIGTRAP) { 1029 tmpstat = linux_ptrace_status(td, 1030 siginfo.si_pid, tmpstat); 1031 } 1032 #endif 1033 } else if (WIFCONTINUED(tmpstat)) { 1034 tmpstat = 0xffff; 1035 } 1036 error = copyout(&tmpstat, statusp, sizeof(int)); 1037 } 1038 1039 return (error); 1040 } 1041 1042 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1043 int 1044 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 1045 { 1046 struct linux_wait4_args wait4_args; 1047 1048 wait4_args.pid = args->pid; 1049 wait4_args.status = args->status; 1050 wait4_args.options = args->options; 1051 wait4_args.rusage = NULL; 1052 1053 return (linux_wait4(td, &wait4_args)); 1054 } 1055 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1056 1057 int 1058 linux_wait4(struct thread *td, struct linux_wait4_args *args) 1059 { 1060 int error, options; 1061 struct __wrusage wru, *wrup; 1062 1063 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG | 1064 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 1065 return (EINVAL); 1066 1067 options = WEXITED; 1068 linux_to_bsd_waitopts(args->options, &options); 1069 1070 if (args->rusage != NULL) 1071 wrup = &wru; 1072 else 1073 wrup = NULL; 1074 error = linux_common_wait(td, args->pid, args->status, options, wrup); 1075 if (error != 0) 1076 return (error); 1077 if (args->rusage != NULL) 1078 error = linux_copyout_rusage(&wru.wru_self, args->rusage); 1079 return (error); 1080 } 1081 1082 int 1083 linux_waitid(struct thread *td, struct linux_waitid_args *args) 1084 { 1085 int status, options, sig; 1086 struct __wrusage wru; 1087 siginfo_t siginfo; 1088 l_siginfo_t lsi; 1089 idtype_t idtype; 1090 int error; 1091 1092 options = 0; 1093 linux_to_bsd_waitopts(args->options, &options); 1094 1095 if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED)) 1096 return (EINVAL); 1097 if (!(options & (WEXITED | WUNTRACED | WCONTINUED))) 1098 return (EINVAL); 1099 1100 switch (args->idtype) { 1101 case LINUX_P_ALL: 1102 idtype = P_ALL; 1103 break; 1104 case LINUX_P_PID: 1105 if (args->id <= 0) 1106 return (EINVAL); 1107 idtype = P_PID; 1108 break; 1109 case LINUX_P_PGID: 1110 if (args->id <= 0) 1111 return (EINVAL); 1112 idtype = P_PGID; 1113 break; 1114 default: 1115 return (EINVAL); 1116 } 1117 1118 error = kern_wait6(td, idtype, args->id, &status, options, 1119 &wru, &siginfo); 1120 if (error != 0) 1121 return (error); 1122 if (args->rusage != NULL) { 1123 error = linux_copyout_rusage(&wru.wru_children, 1124 args->rusage); 1125 if (error != 0) 1126 return (error); 1127 } 1128 if (args->info != NULL) { 1129 bzero(&lsi, sizeof(lsi)); 1130 if (td->td_retval[0] != 0) { 1131 sig = bsd_to_linux_signal(siginfo.si_signo); 1132 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 1133 } 1134 error = copyout(&lsi, args->info, sizeof(lsi)); 1135 } 1136 td->td_retval[0] = 0; 1137 1138 return (error); 1139 } 1140 1141 #ifdef LINUX_LEGACY_SYSCALLS 1142 int 1143 linux_mknod(struct thread *td, struct linux_mknod_args *args) 1144 { 1145 char *path; 1146 int error; 1147 enum uio_seg seg; 1148 bool convpath; 1149 1150 convpath = LUSECONVPATH(td); 1151 if (!convpath) { 1152 path = args->path; 1153 seg = UIO_USERSPACE; 1154 } else { 1155 LCONVPATHCREAT(args->path, &path); 1156 seg = UIO_SYSSPACE; 1157 } 1158 1159 switch (args->mode & S_IFMT) { 1160 case S_IFIFO: 1161 case S_IFSOCK: 1162 error = kern_mkfifoat(td, AT_FDCWD, path, seg, 1163 args->mode); 1164 break; 1165 1166 case S_IFCHR: 1167 case S_IFBLK: 1168 error = kern_mknodat(td, AT_FDCWD, path, seg, 1169 args->mode, args->dev); 1170 break; 1171 1172 case S_IFDIR: 1173 error = EPERM; 1174 break; 1175 1176 case 0: 1177 args->mode |= S_IFREG; 1178 /* FALLTHROUGH */ 1179 case S_IFREG: 1180 error = kern_openat(td, AT_FDCWD, path, seg, 1181 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1182 if (error == 0) 1183 kern_close(td, td->td_retval[0]); 1184 break; 1185 1186 default: 1187 error = EINVAL; 1188 break; 1189 } 1190 if (convpath) 1191 LFREEPATH(path); 1192 return (error); 1193 } 1194 #endif 1195 1196 int 1197 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1198 { 1199 char *path; 1200 int error, dfd; 1201 enum uio_seg seg; 1202 bool convpath; 1203 1204 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1205 1206 convpath = LUSECONVPATH(td); 1207 if (!convpath) { 1208 path = __DECONST(char *, args->filename); 1209 seg = UIO_USERSPACE; 1210 } else { 1211 LCONVPATHCREAT_AT(args->filename, &path, dfd); 1212 seg = UIO_SYSSPACE; 1213 } 1214 1215 switch (args->mode & S_IFMT) { 1216 case S_IFIFO: 1217 case S_IFSOCK: 1218 error = kern_mkfifoat(td, dfd, path, seg, args->mode); 1219 break; 1220 1221 case S_IFCHR: 1222 case S_IFBLK: 1223 error = kern_mknodat(td, dfd, path, seg, args->mode, 1224 args->dev); 1225 break; 1226 1227 case S_IFDIR: 1228 error = EPERM; 1229 break; 1230 1231 case 0: 1232 args->mode |= S_IFREG; 1233 /* FALLTHROUGH */ 1234 case S_IFREG: 1235 error = kern_openat(td, dfd, path, seg, 1236 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1237 if (error == 0) 1238 kern_close(td, td->td_retval[0]); 1239 break; 1240 1241 default: 1242 error = EINVAL; 1243 break; 1244 } 1245 if (convpath) 1246 LFREEPATH(path); 1247 return (error); 1248 } 1249 1250 /* 1251 * UGH! This is just about the dumbest idea I've ever heard!! 1252 */ 1253 int 1254 linux_personality(struct thread *td, struct linux_personality_args *args) 1255 { 1256 struct linux_pemuldata *pem; 1257 struct proc *p = td->td_proc; 1258 uint32_t old; 1259 1260 PROC_LOCK(p); 1261 pem = pem_find(p); 1262 old = pem->persona; 1263 if (args->per != 0xffffffff) 1264 pem->persona = args->per; 1265 PROC_UNLOCK(p); 1266 1267 td->td_retval[0] = old; 1268 return (0); 1269 } 1270 1271 struct l_itimerval { 1272 l_timeval it_interval; 1273 l_timeval it_value; 1274 }; 1275 1276 #define B2L_ITIMERVAL(bip, lip) \ 1277 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1278 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1279 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1280 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1281 1282 int 1283 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1284 { 1285 int error; 1286 struct l_itimerval ls; 1287 struct itimerval aitv, oitv; 1288 1289 if (uap->itv == NULL) { 1290 uap->itv = uap->oitv; 1291 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1292 } 1293 1294 error = copyin(uap->itv, &ls, sizeof(ls)); 1295 if (error != 0) 1296 return (error); 1297 B2L_ITIMERVAL(&aitv, &ls); 1298 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1299 if (error != 0 || uap->oitv == NULL) 1300 return (error); 1301 B2L_ITIMERVAL(&ls, &oitv); 1302 1303 return (copyout(&ls, uap->oitv, sizeof(ls))); 1304 } 1305 1306 int 1307 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1308 { 1309 int error; 1310 struct l_itimerval ls; 1311 struct itimerval aitv; 1312 1313 error = kern_getitimer(td, uap->which, &aitv); 1314 if (error != 0) 1315 return (error); 1316 B2L_ITIMERVAL(&ls, &aitv); 1317 return (copyout(&ls, uap->itv, sizeof(ls))); 1318 } 1319 1320 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1321 int 1322 linux_nice(struct thread *td, struct linux_nice_args *args) 1323 { 1324 1325 return (kern_setpriority(td, PRIO_PROCESS, 0, args->inc)); 1326 } 1327 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1328 1329 int 1330 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1331 { 1332 struct ucred *newcred, *oldcred; 1333 l_gid_t *linux_gidset; 1334 gid_t *bsd_gidset; 1335 int ngrp, error; 1336 struct proc *p; 1337 1338 ngrp = args->gidsetsize; 1339 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1340 return (EINVAL); 1341 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK); 1342 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1343 if (error) 1344 goto out; 1345 newcred = crget(); 1346 crextend(newcred, ngrp + 1); 1347 p = td->td_proc; 1348 PROC_LOCK(p); 1349 oldcred = p->p_ucred; 1350 crcopy(newcred, oldcred); 1351 1352 /* 1353 * cr_groups[0] holds egid. Setting the whole set from 1354 * the supplied set will cause egid to be changed too. 1355 * Keep cr_groups[0] unchanged to prevent that. 1356 */ 1357 1358 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) { 1359 PROC_UNLOCK(p); 1360 crfree(newcred); 1361 goto out; 1362 } 1363 1364 if (ngrp > 0) { 1365 newcred->cr_ngroups = ngrp + 1; 1366 1367 bsd_gidset = newcred->cr_groups; 1368 ngrp--; 1369 while (ngrp >= 0) { 1370 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1371 ngrp--; 1372 } 1373 } else 1374 newcred->cr_ngroups = 1; 1375 1376 setsugid(p); 1377 proc_set_cred(p, newcred); 1378 PROC_UNLOCK(p); 1379 crfree(oldcred); 1380 error = 0; 1381 out: 1382 free(linux_gidset, M_LINUX); 1383 return (error); 1384 } 1385 1386 int 1387 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1388 { 1389 struct ucred *cred; 1390 l_gid_t *linux_gidset; 1391 gid_t *bsd_gidset; 1392 int bsd_gidsetsz, ngrp, error; 1393 1394 cred = td->td_ucred; 1395 bsd_gidset = cred->cr_groups; 1396 bsd_gidsetsz = cred->cr_ngroups - 1; 1397 1398 /* 1399 * cr_groups[0] holds egid. Returning the whole set 1400 * here will cause a duplicate. Exclude cr_groups[0] 1401 * to prevent that. 1402 */ 1403 1404 if ((ngrp = args->gidsetsize) == 0) { 1405 td->td_retval[0] = bsd_gidsetsz; 1406 return (0); 1407 } 1408 1409 if (ngrp < bsd_gidsetsz) 1410 return (EINVAL); 1411 1412 ngrp = 0; 1413 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1414 M_LINUX, M_WAITOK); 1415 while (ngrp < bsd_gidsetsz) { 1416 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1417 ngrp++; 1418 } 1419 1420 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1421 free(linux_gidset, M_LINUX); 1422 if (error) 1423 return (error); 1424 1425 td->td_retval[0] = ngrp; 1426 return (0); 1427 } 1428 1429 static bool 1430 linux_get_dummy_limit(l_uint resource, struct rlimit *rlim) 1431 { 1432 1433 if (linux_dummy_rlimits == 0) 1434 return (false); 1435 1436 switch (resource) { 1437 case LINUX_RLIMIT_LOCKS: 1438 case LINUX_RLIMIT_SIGPENDING: 1439 case LINUX_RLIMIT_MSGQUEUE: 1440 case LINUX_RLIMIT_RTTIME: 1441 rlim->rlim_cur = LINUX_RLIM_INFINITY; 1442 rlim->rlim_max = LINUX_RLIM_INFINITY; 1443 return (true); 1444 case LINUX_RLIMIT_NICE: 1445 case LINUX_RLIMIT_RTPRIO: 1446 rlim->rlim_cur = 0; 1447 rlim->rlim_max = 0; 1448 return (true); 1449 default: 1450 return (false); 1451 } 1452 } 1453 1454 int 1455 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1456 { 1457 struct rlimit bsd_rlim; 1458 struct l_rlimit rlim; 1459 u_int which; 1460 int error; 1461 1462 if (args->resource >= LINUX_RLIM_NLIMITS) 1463 return (EINVAL); 1464 1465 which = linux_to_bsd_resource[args->resource]; 1466 if (which == -1) 1467 return (EINVAL); 1468 1469 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1470 if (error) 1471 return (error); 1472 1473 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1474 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1475 return (kern_setrlimit(td, which, &bsd_rlim)); 1476 } 1477 1478 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1479 int 1480 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1481 { 1482 struct l_rlimit rlim; 1483 struct rlimit bsd_rlim; 1484 u_int which; 1485 1486 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1487 rlim.rlim_cur = bsd_rlim.rlim_cur; 1488 rlim.rlim_max = bsd_rlim.rlim_max; 1489 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1490 } 1491 1492 if (args->resource >= LINUX_RLIM_NLIMITS) 1493 return (EINVAL); 1494 1495 which = linux_to_bsd_resource[args->resource]; 1496 if (which == -1) 1497 return (EINVAL); 1498 1499 lim_rlimit(td, which, &bsd_rlim); 1500 1501 #ifdef COMPAT_LINUX32 1502 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1503 if (rlim.rlim_cur == UINT_MAX) 1504 rlim.rlim_cur = INT_MAX; 1505 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1506 if (rlim.rlim_max == UINT_MAX) 1507 rlim.rlim_max = INT_MAX; 1508 #else 1509 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1510 if (rlim.rlim_cur == ULONG_MAX) 1511 rlim.rlim_cur = LONG_MAX; 1512 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1513 if (rlim.rlim_max == ULONG_MAX) 1514 rlim.rlim_max = LONG_MAX; 1515 #endif 1516 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1517 } 1518 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1519 1520 int 1521 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1522 { 1523 struct l_rlimit rlim; 1524 struct rlimit bsd_rlim; 1525 u_int which; 1526 1527 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1528 rlim.rlim_cur = bsd_rlim.rlim_cur; 1529 rlim.rlim_max = bsd_rlim.rlim_max; 1530 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1531 } 1532 1533 if (args->resource >= LINUX_RLIM_NLIMITS) 1534 return (EINVAL); 1535 1536 which = linux_to_bsd_resource[args->resource]; 1537 if (which == -1) 1538 return (EINVAL); 1539 1540 lim_rlimit(td, which, &bsd_rlim); 1541 1542 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1543 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1544 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1545 } 1546 1547 int 1548 linux_sched_setscheduler(struct thread *td, 1549 struct linux_sched_setscheduler_args *args) 1550 { 1551 struct sched_param sched_param; 1552 struct thread *tdt; 1553 int error, policy; 1554 1555 switch (args->policy) { 1556 case LINUX_SCHED_OTHER: 1557 policy = SCHED_OTHER; 1558 break; 1559 case LINUX_SCHED_FIFO: 1560 policy = SCHED_FIFO; 1561 break; 1562 case LINUX_SCHED_RR: 1563 policy = SCHED_RR; 1564 break; 1565 default: 1566 return (EINVAL); 1567 } 1568 1569 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1570 if (error) 1571 return (error); 1572 1573 if (linux_map_sched_prio) { 1574 switch (policy) { 1575 case SCHED_OTHER: 1576 if (sched_param.sched_priority != 0) 1577 return (EINVAL); 1578 1579 sched_param.sched_priority = 1580 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 1581 break; 1582 case SCHED_FIFO: 1583 case SCHED_RR: 1584 if (sched_param.sched_priority < 1 || 1585 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) 1586 return (EINVAL); 1587 1588 /* 1589 * Map [1, LINUX_MAX_RT_PRIO - 1] to 1590 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 1591 */ 1592 sched_param.sched_priority = 1593 (sched_param.sched_priority - 1) * 1594 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 1595 (LINUX_MAX_RT_PRIO - 1); 1596 break; 1597 } 1598 } 1599 1600 tdt = linux_tdfind(td, args->pid, -1); 1601 if (tdt == NULL) 1602 return (ESRCH); 1603 1604 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1605 PROC_UNLOCK(tdt->td_proc); 1606 return (error); 1607 } 1608 1609 int 1610 linux_sched_getscheduler(struct thread *td, 1611 struct linux_sched_getscheduler_args *args) 1612 { 1613 struct thread *tdt; 1614 int error, policy; 1615 1616 tdt = linux_tdfind(td, args->pid, -1); 1617 if (tdt == NULL) 1618 return (ESRCH); 1619 1620 error = kern_sched_getscheduler(td, tdt, &policy); 1621 PROC_UNLOCK(tdt->td_proc); 1622 1623 switch (policy) { 1624 case SCHED_OTHER: 1625 td->td_retval[0] = LINUX_SCHED_OTHER; 1626 break; 1627 case SCHED_FIFO: 1628 td->td_retval[0] = LINUX_SCHED_FIFO; 1629 break; 1630 case SCHED_RR: 1631 td->td_retval[0] = LINUX_SCHED_RR; 1632 break; 1633 } 1634 return (error); 1635 } 1636 1637 int 1638 linux_sched_get_priority_max(struct thread *td, 1639 struct linux_sched_get_priority_max_args *args) 1640 { 1641 struct sched_get_priority_max_args bsd; 1642 1643 if (linux_map_sched_prio) { 1644 switch (args->policy) { 1645 case LINUX_SCHED_OTHER: 1646 td->td_retval[0] = 0; 1647 return (0); 1648 case LINUX_SCHED_FIFO: 1649 case LINUX_SCHED_RR: 1650 td->td_retval[0] = LINUX_MAX_RT_PRIO - 1; 1651 return (0); 1652 default: 1653 return (EINVAL); 1654 } 1655 } 1656 1657 switch (args->policy) { 1658 case LINUX_SCHED_OTHER: 1659 bsd.policy = SCHED_OTHER; 1660 break; 1661 case LINUX_SCHED_FIFO: 1662 bsd.policy = SCHED_FIFO; 1663 break; 1664 case LINUX_SCHED_RR: 1665 bsd.policy = SCHED_RR; 1666 break; 1667 default: 1668 return (EINVAL); 1669 } 1670 return (sys_sched_get_priority_max(td, &bsd)); 1671 } 1672 1673 int 1674 linux_sched_get_priority_min(struct thread *td, 1675 struct linux_sched_get_priority_min_args *args) 1676 { 1677 struct sched_get_priority_min_args bsd; 1678 1679 if (linux_map_sched_prio) { 1680 switch (args->policy) { 1681 case LINUX_SCHED_OTHER: 1682 td->td_retval[0] = 0; 1683 return (0); 1684 case LINUX_SCHED_FIFO: 1685 case LINUX_SCHED_RR: 1686 td->td_retval[0] = 1; 1687 return (0); 1688 default: 1689 return (EINVAL); 1690 } 1691 } 1692 1693 switch (args->policy) { 1694 case LINUX_SCHED_OTHER: 1695 bsd.policy = SCHED_OTHER; 1696 break; 1697 case LINUX_SCHED_FIFO: 1698 bsd.policy = SCHED_FIFO; 1699 break; 1700 case LINUX_SCHED_RR: 1701 bsd.policy = SCHED_RR; 1702 break; 1703 default: 1704 return (EINVAL); 1705 } 1706 return (sys_sched_get_priority_min(td, &bsd)); 1707 } 1708 1709 #define REBOOT_CAD_ON 0x89abcdef 1710 #define REBOOT_CAD_OFF 0 1711 #define REBOOT_HALT 0xcdef0123 1712 #define REBOOT_RESTART 0x01234567 1713 #define REBOOT_RESTART2 0xA1B2C3D4 1714 #define REBOOT_POWEROFF 0x4321FEDC 1715 #define REBOOT_MAGIC1 0xfee1dead 1716 #define REBOOT_MAGIC2 0x28121969 1717 #define REBOOT_MAGIC2A 0x05121996 1718 #define REBOOT_MAGIC2B 0x16041998 1719 1720 int 1721 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1722 { 1723 struct reboot_args bsd_args; 1724 1725 if (args->magic1 != REBOOT_MAGIC1) 1726 return (EINVAL); 1727 1728 switch (args->magic2) { 1729 case REBOOT_MAGIC2: 1730 case REBOOT_MAGIC2A: 1731 case REBOOT_MAGIC2B: 1732 break; 1733 default: 1734 return (EINVAL); 1735 } 1736 1737 switch (args->cmd) { 1738 case REBOOT_CAD_ON: 1739 case REBOOT_CAD_OFF: 1740 return (priv_check(td, PRIV_REBOOT)); 1741 case REBOOT_HALT: 1742 bsd_args.opt = RB_HALT; 1743 break; 1744 case REBOOT_RESTART: 1745 case REBOOT_RESTART2: 1746 bsd_args.opt = 0; 1747 break; 1748 case REBOOT_POWEROFF: 1749 bsd_args.opt = RB_POWEROFF; 1750 break; 1751 default: 1752 return (EINVAL); 1753 } 1754 return (sys_reboot(td, &bsd_args)); 1755 } 1756 1757 int 1758 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1759 { 1760 1761 td->td_retval[0] = td->td_proc->p_pid; 1762 1763 return (0); 1764 } 1765 1766 int 1767 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1768 { 1769 struct linux_emuldata *em; 1770 1771 em = em_find(td); 1772 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1773 1774 td->td_retval[0] = em->em_tid; 1775 1776 return (0); 1777 } 1778 1779 int 1780 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1781 { 1782 1783 td->td_retval[0] = kern_getppid(td); 1784 return (0); 1785 } 1786 1787 int 1788 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1789 { 1790 1791 td->td_retval[0] = td->td_ucred->cr_rgid; 1792 return (0); 1793 } 1794 1795 int 1796 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1797 { 1798 1799 td->td_retval[0] = td->td_ucred->cr_ruid; 1800 return (0); 1801 } 1802 1803 int 1804 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1805 { 1806 1807 return (kern_getsid(td, args->pid)); 1808 } 1809 1810 int 1811 linux_nosys(struct thread *td, struct nosys_args *ignore) 1812 { 1813 1814 return (ENOSYS); 1815 } 1816 1817 int 1818 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1819 { 1820 int error; 1821 1822 error = kern_getpriority(td, args->which, args->who); 1823 td->td_retval[0] = 20 - td->td_retval[0]; 1824 return (error); 1825 } 1826 1827 int 1828 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1829 { 1830 int name[2]; 1831 1832 name[0] = CTL_KERN; 1833 name[1] = KERN_HOSTNAME; 1834 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1835 args->len, 0, 0)); 1836 } 1837 1838 int 1839 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1840 { 1841 int name[2]; 1842 1843 name[0] = CTL_KERN; 1844 name[1] = KERN_NISDOMAINNAME; 1845 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1846 args->len, 0, 0)); 1847 } 1848 1849 int 1850 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1851 { 1852 1853 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1854 args->error_code); 1855 1856 /* 1857 * XXX: we should send a signal to the parent if 1858 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1859 * as it doesnt occur often. 1860 */ 1861 exit1(td, args->error_code, 0); 1862 /* NOTREACHED */ 1863 } 1864 1865 #define _LINUX_CAPABILITY_VERSION_1 0x19980330 1866 #define _LINUX_CAPABILITY_VERSION_2 0x20071026 1867 #define _LINUX_CAPABILITY_VERSION_3 0x20080522 1868 1869 struct l_user_cap_header { 1870 l_int version; 1871 l_int pid; 1872 }; 1873 1874 struct l_user_cap_data { 1875 l_int effective; 1876 l_int permitted; 1877 l_int inheritable; 1878 }; 1879 1880 int 1881 linux_capget(struct thread *td, struct linux_capget_args *uap) 1882 { 1883 struct l_user_cap_header luch; 1884 struct l_user_cap_data lucd[2]; 1885 int error, u32s; 1886 1887 if (uap->hdrp == NULL) 1888 return (EFAULT); 1889 1890 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1891 if (error != 0) 1892 return (error); 1893 1894 switch (luch.version) { 1895 case _LINUX_CAPABILITY_VERSION_1: 1896 u32s = 1; 1897 break; 1898 case _LINUX_CAPABILITY_VERSION_2: 1899 case _LINUX_CAPABILITY_VERSION_3: 1900 u32s = 2; 1901 break; 1902 default: 1903 luch.version = _LINUX_CAPABILITY_VERSION_1; 1904 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1905 if (error) 1906 return (error); 1907 return (EINVAL); 1908 } 1909 1910 if (luch.pid) 1911 return (EPERM); 1912 1913 if (uap->datap) { 1914 /* 1915 * The current implementation doesn't support setting 1916 * a capability (it's essentially a stub) so indicate 1917 * that no capabilities are currently set or available 1918 * to request. 1919 */ 1920 memset(&lucd, 0, u32s * sizeof(lucd[0])); 1921 error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0])); 1922 } 1923 1924 return (error); 1925 } 1926 1927 int 1928 linux_capset(struct thread *td, struct linux_capset_args *uap) 1929 { 1930 struct l_user_cap_header luch; 1931 struct l_user_cap_data lucd[2]; 1932 int error, i, u32s; 1933 1934 if (uap->hdrp == NULL || uap->datap == NULL) 1935 return (EFAULT); 1936 1937 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1938 if (error != 0) 1939 return (error); 1940 1941 switch (luch.version) { 1942 case _LINUX_CAPABILITY_VERSION_1: 1943 u32s = 1; 1944 break; 1945 case _LINUX_CAPABILITY_VERSION_2: 1946 case _LINUX_CAPABILITY_VERSION_3: 1947 u32s = 2; 1948 break; 1949 default: 1950 luch.version = _LINUX_CAPABILITY_VERSION_1; 1951 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1952 if (error) 1953 return (error); 1954 return (EINVAL); 1955 } 1956 1957 if (luch.pid) 1958 return (EPERM); 1959 1960 error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0])); 1961 if (error != 0) 1962 return (error); 1963 1964 /* We currently don't support setting any capabilities. */ 1965 for (i = 0; i < u32s; i++) { 1966 if (lucd[i].effective || lucd[i].permitted || 1967 lucd[i].inheritable) { 1968 linux_msg(td, 1969 "capset[%d] effective=0x%x, permitted=0x%x, " 1970 "inheritable=0x%x is not implemented", i, 1971 (int)lucd[i].effective, (int)lucd[i].permitted, 1972 (int)lucd[i].inheritable); 1973 return (EPERM); 1974 } 1975 } 1976 1977 return (0); 1978 } 1979 1980 int 1981 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1982 { 1983 int error = 0, max_size, arg; 1984 struct proc *p = td->td_proc; 1985 char comm[LINUX_MAX_COMM_LEN]; 1986 int pdeath_signal, trace_state; 1987 1988 switch (args->option) { 1989 case LINUX_PR_SET_PDEATHSIG: 1990 if (!LINUX_SIG_VALID(args->arg2)) 1991 return (EINVAL); 1992 pdeath_signal = linux_to_bsd_signal(args->arg2); 1993 return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL, 1994 &pdeath_signal)); 1995 case LINUX_PR_GET_PDEATHSIG: 1996 error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS, 1997 &pdeath_signal); 1998 if (error != 0) 1999 return (error); 2000 pdeath_signal = bsd_to_linux_signal(pdeath_signal); 2001 return (copyout(&pdeath_signal, 2002 (void *)(register_t)args->arg2, 2003 sizeof(pdeath_signal))); 2004 /* 2005 * In Linux, this flag controls if set[gu]id processes can coredump. 2006 * There are additional semantics imposed on processes that cannot 2007 * coredump: 2008 * - Such processes can not be ptraced. 2009 * - There are some semantics around ownership of process-related files 2010 * in the /proc namespace. 2011 * 2012 * In FreeBSD, we can (and by default, do) disable setuid coredump 2013 * system-wide with 'sugid_coredump.' We control tracability on a 2014 * per-process basis with the procctl PROC_TRACE (=> P2_NOTRACE flag). 2015 * By happy coincidence, P2_NOTRACE also prevents coredumping. So the 2016 * procctl is roughly analogous to Linux's DUMPABLE. 2017 * 2018 * So, proxy these knobs to the corresponding PROC_TRACE setting. 2019 */ 2020 case LINUX_PR_GET_DUMPABLE: 2021 error = kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_STATUS, 2022 &trace_state); 2023 if (error != 0) 2024 return (error); 2025 td->td_retval[0] = (trace_state != -1); 2026 return (0); 2027 case LINUX_PR_SET_DUMPABLE: 2028 /* 2029 * It is only valid for userspace to set one of these two 2030 * flags, and only one at a time. 2031 */ 2032 switch (args->arg2) { 2033 case LINUX_SUID_DUMP_DISABLE: 2034 trace_state = PROC_TRACE_CTL_DISABLE_EXEC; 2035 break; 2036 case LINUX_SUID_DUMP_USER: 2037 trace_state = PROC_TRACE_CTL_ENABLE; 2038 break; 2039 default: 2040 return (EINVAL); 2041 } 2042 return (kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_CTL, 2043 &trace_state)); 2044 case LINUX_PR_GET_KEEPCAPS: 2045 /* 2046 * Indicate that we always clear the effective and 2047 * permitted capability sets when the user id becomes 2048 * non-zero (actually the capability sets are simply 2049 * always zero in the current implementation). 2050 */ 2051 td->td_retval[0] = 0; 2052 break; 2053 case LINUX_PR_SET_KEEPCAPS: 2054 /* 2055 * Ignore requests to keep the effective and permitted 2056 * capability sets when the user id becomes non-zero. 2057 */ 2058 break; 2059 case LINUX_PR_SET_NAME: 2060 /* 2061 * To be on the safe side we need to make sure to not 2062 * overflow the size a Linux program expects. We already 2063 * do this here in the copyin, so that we don't need to 2064 * check on copyout. 2065 */ 2066 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 2067 error = copyinstr((void *)(register_t)args->arg2, comm, 2068 max_size, NULL); 2069 2070 /* Linux silently truncates the name if it is too long. */ 2071 if (error == ENAMETOOLONG) { 2072 /* 2073 * XXX: copyinstr() isn't documented to populate the 2074 * array completely, so do a copyin() to be on the 2075 * safe side. This should be changed in case 2076 * copyinstr() is changed to guarantee this. 2077 */ 2078 error = copyin((void *)(register_t)args->arg2, comm, 2079 max_size - 1); 2080 comm[max_size - 1] = '\0'; 2081 } 2082 if (error) 2083 return (error); 2084 2085 PROC_LOCK(p); 2086 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 2087 PROC_UNLOCK(p); 2088 break; 2089 case LINUX_PR_GET_NAME: 2090 PROC_LOCK(p); 2091 strlcpy(comm, p->p_comm, sizeof(comm)); 2092 PROC_UNLOCK(p); 2093 error = copyout(comm, (void *)(register_t)args->arg2, 2094 strlen(comm) + 1); 2095 break; 2096 case LINUX_PR_GET_SECCOMP: 2097 case LINUX_PR_SET_SECCOMP: 2098 /* 2099 * Same as returned by Linux without CONFIG_SECCOMP enabled. 2100 */ 2101 error = EINVAL; 2102 break; 2103 case LINUX_PR_CAPBSET_READ: 2104 #if 0 2105 /* 2106 * This makes too much noise with Ubuntu Focal. 2107 */ 2108 linux_msg(td, "unsupported prctl PR_CAPBSET_READ %d", 2109 (int)args->arg2); 2110 #endif 2111 error = EINVAL; 2112 break; 2113 case LINUX_PR_SET_NO_NEW_PRIVS: 2114 arg = args->arg2 == 1 ? 2115 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE; 2116 error = kern_procctl(td, P_PID, p->p_pid, 2117 PROC_NO_NEW_PRIVS_CTL, &arg); 2118 break; 2119 case LINUX_PR_SET_PTRACER: 2120 linux_msg(td, "unsupported prctl PR_SET_PTRACER"); 2121 error = EINVAL; 2122 break; 2123 default: 2124 linux_msg(td, "unsupported prctl option %d", args->option); 2125 error = EINVAL; 2126 break; 2127 } 2128 2129 return (error); 2130 } 2131 2132 int 2133 linux_sched_setparam(struct thread *td, 2134 struct linux_sched_setparam_args *uap) 2135 { 2136 struct sched_param sched_param; 2137 struct thread *tdt; 2138 int error, policy; 2139 2140 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 2141 if (error) 2142 return (error); 2143 2144 tdt = linux_tdfind(td, uap->pid, -1); 2145 if (tdt == NULL) 2146 return (ESRCH); 2147 2148 if (linux_map_sched_prio) { 2149 error = kern_sched_getscheduler(td, tdt, &policy); 2150 if (error) 2151 goto out; 2152 2153 switch (policy) { 2154 case SCHED_OTHER: 2155 if (sched_param.sched_priority != 0) { 2156 error = EINVAL; 2157 goto out; 2158 } 2159 sched_param.sched_priority = 2160 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 2161 break; 2162 case SCHED_FIFO: 2163 case SCHED_RR: 2164 if (sched_param.sched_priority < 1 || 2165 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) { 2166 error = EINVAL; 2167 goto out; 2168 } 2169 /* 2170 * Map [1, LINUX_MAX_RT_PRIO - 1] to 2171 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 2172 */ 2173 sched_param.sched_priority = 2174 (sched_param.sched_priority - 1) * 2175 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 2176 (LINUX_MAX_RT_PRIO - 1); 2177 break; 2178 } 2179 } 2180 2181 error = kern_sched_setparam(td, tdt, &sched_param); 2182 out: PROC_UNLOCK(tdt->td_proc); 2183 return (error); 2184 } 2185 2186 int 2187 linux_sched_getparam(struct thread *td, 2188 struct linux_sched_getparam_args *uap) 2189 { 2190 struct sched_param sched_param; 2191 struct thread *tdt; 2192 int error, policy; 2193 2194 tdt = linux_tdfind(td, uap->pid, -1); 2195 if (tdt == NULL) 2196 return (ESRCH); 2197 2198 error = kern_sched_getparam(td, tdt, &sched_param); 2199 if (error) { 2200 PROC_UNLOCK(tdt->td_proc); 2201 return (error); 2202 } 2203 2204 if (linux_map_sched_prio) { 2205 error = kern_sched_getscheduler(td, tdt, &policy); 2206 PROC_UNLOCK(tdt->td_proc); 2207 if (error) 2208 return (error); 2209 2210 switch (policy) { 2211 case SCHED_OTHER: 2212 sched_param.sched_priority = 0; 2213 break; 2214 case SCHED_FIFO: 2215 case SCHED_RR: 2216 /* 2217 * Map [0, RTP_PRIO_MAX - RTP_PRIO_MIN] to 2218 * [1, LINUX_MAX_RT_PRIO - 1] (rounding up). 2219 */ 2220 sched_param.sched_priority = 2221 (sched_param.sched_priority * 2222 (LINUX_MAX_RT_PRIO - 1) + 2223 (RTP_PRIO_MAX - RTP_PRIO_MIN - 1)) / 2224 (RTP_PRIO_MAX - RTP_PRIO_MIN) + 1; 2225 break; 2226 } 2227 } else 2228 PROC_UNLOCK(tdt->td_proc); 2229 2230 error = copyout(&sched_param, uap->param, sizeof(sched_param)); 2231 return (error); 2232 } 2233 2234 /* 2235 * Get affinity of a process. 2236 */ 2237 int 2238 linux_sched_getaffinity(struct thread *td, 2239 struct linux_sched_getaffinity_args *args) 2240 { 2241 int error; 2242 struct thread *tdt; 2243 2244 if (args->len < sizeof(cpuset_t)) 2245 return (EINVAL); 2246 2247 tdt = linux_tdfind(td, args->pid, -1); 2248 if (tdt == NULL) 2249 return (ESRCH); 2250 2251 PROC_UNLOCK(tdt->td_proc); 2252 2253 error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2254 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr); 2255 if (error == 0) 2256 td->td_retval[0] = sizeof(cpuset_t); 2257 2258 return (error); 2259 } 2260 2261 /* 2262 * Set affinity of a process. 2263 */ 2264 int 2265 linux_sched_setaffinity(struct thread *td, 2266 struct linux_sched_setaffinity_args *args) 2267 { 2268 struct thread *tdt; 2269 2270 if (args->len < sizeof(cpuset_t)) 2271 return (EINVAL); 2272 2273 tdt = linux_tdfind(td, args->pid, -1); 2274 if (tdt == NULL) 2275 return (ESRCH); 2276 2277 PROC_UNLOCK(tdt->td_proc); 2278 2279 return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2280 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr)); 2281 } 2282 2283 struct linux_rlimit64 { 2284 uint64_t rlim_cur; 2285 uint64_t rlim_max; 2286 }; 2287 2288 int 2289 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2290 { 2291 struct rlimit rlim, nrlim; 2292 struct linux_rlimit64 lrlim; 2293 struct proc *p; 2294 u_int which; 2295 int flags; 2296 int error; 2297 2298 if (args->new == NULL && args->old != NULL) { 2299 if (linux_get_dummy_limit(args->resource, &rlim)) { 2300 lrlim.rlim_cur = rlim.rlim_cur; 2301 lrlim.rlim_max = rlim.rlim_max; 2302 return (copyout(&lrlim, args->old, sizeof(lrlim))); 2303 } 2304 } 2305 2306 if (args->resource >= LINUX_RLIM_NLIMITS) 2307 return (EINVAL); 2308 2309 which = linux_to_bsd_resource[args->resource]; 2310 if (which == -1) 2311 return (EINVAL); 2312 2313 if (args->new != NULL) { 2314 /* 2315 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2316 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2317 * as INFINITY so we do not need a conversion even. 2318 */ 2319 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2320 if (error != 0) 2321 return (error); 2322 } 2323 2324 flags = PGET_HOLD | PGET_NOTWEXIT; 2325 if (args->new != NULL) 2326 flags |= PGET_CANDEBUG; 2327 else 2328 flags |= PGET_CANSEE; 2329 if (args->pid == 0) { 2330 p = td->td_proc; 2331 PHOLD(p); 2332 } else { 2333 error = pget(args->pid, flags, &p); 2334 if (error != 0) 2335 return (error); 2336 } 2337 if (args->old != NULL) { 2338 PROC_LOCK(p); 2339 lim_rlimit_proc(p, which, &rlim); 2340 PROC_UNLOCK(p); 2341 if (rlim.rlim_cur == RLIM_INFINITY) 2342 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2343 else 2344 lrlim.rlim_cur = rlim.rlim_cur; 2345 if (rlim.rlim_max == RLIM_INFINITY) 2346 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2347 else 2348 lrlim.rlim_max = rlim.rlim_max; 2349 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2350 if (error != 0) 2351 goto out; 2352 } 2353 2354 if (args->new != NULL) 2355 error = kern_proc_setrlimit(td, p, which, &nrlim); 2356 2357 out: 2358 PRELE(p); 2359 return (error); 2360 } 2361 2362 int 2363 linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2364 { 2365 struct l_timespec lts; 2366 struct timespec ts, *tsp; 2367 int error; 2368 2369 if (args->tsp != NULL) { 2370 error = copyin(args->tsp, <s, sizeof(lts)); 2371 if (error != 0) 2372 return (error); 2373 error = linux_to_native_timespec(&ts, <s); 2374 if (error != 0) 2375 return (error); 2376 tsp = &ts; 2377 } else 2378 tsp = NULL; 2379 2380 error = linux_common_pselect6(td, args->nfds, args->readfds, 2381 args->writefds, args->exceptfds, tsp, args->sig); 2382 if (error != 0) 2383 return (error); 2384 2385 if (args->tsp != NULL) { 2386 error = native_to_linux_timespec(<s, tsp); 2387 if (error == 0) 2388 error = copyout(<s, args->tsp, sizeof(lts)); 2389 } 2390 return (error); 2391 } 2392 2393 static int 2394 linux_common_pselect6(struct thread *td, l_int nfds, l_fd_set *readfds, 2395 l_fd_set *writefds, l_fd_set *exceptfds, struct timespec *tsp, 2396 l_uintptr_t *sig) 2397 { 2398 struct timeval utv, tv0, tv1, *tvp; 2399 struct l_pselect6arg lpse6; 2400 l_sigset_t l_ss; 2401 sigset_t *ssp; 2402 sigset_t ss; 2403 int error; 2404 2405 ssp = NULL; 2406 if (sig != NULL) { 2407 error = copyin(sig, &lpse6, sizeof(lpse6)); 2408 if (error != 0) 2409 return (error); 2410 if (lpse6.ss_len != sizeof(l_ss)) 2411 return (EINVAL); 2412 if (lpse6.ss != 0) { 2413 error = copyin(PTRIN(lpse6.ss), &l_ss, 2414 sizeof(l_ss)); 2415 if (error != 0) 2416 return (error); 2417 linux_to_bsd_sigset(&l_ss, &ss); 2418 ssp = &ss; 2419 } 2420 } else 2421 ssp = NULL; 2422 2423 /* 2424 * Currently glibc changes nanosecond number to microsecond. 2425 * This mean losing precision but for now it is hardly seen. 2426 */ 2427 if (tsp != NULL) { 2428 TIMESPEC_TO_TIMEVAL(&utv, tsp); 2429 if (itimerfix(&utv)) 2430 return (EINVAL); 2431 2432 microtime(&tv0); 2433 tvp = &utv; 2434 } else 2435 tvp = NULL; 2436 2437 error = kern_pselect(td, nfds, readfds, writefds, 2438 exceptfds, tvp, ssp, LINUX_NFDBITS); 2439 2440 if (error == 0 && tsp != NULL) { 2441 if (td->td_retval[0] != 0) { 2442 /* 2443 * Compute how much time was left of the timeout, 2444 * by subtracting the current time and the time 2445 * before we started the call, and subtracting 2446 * that result from the user-supplied value. 2447 */ 2448 2449 microtime(&tv1); 2450 timevalsub(&tv1, &tv0); 2451 timevalsub(&utv, &tv1); 2452 if (utv.tv_sec < 0) 2453 timevalclear(&utv); 2454 } else 2455 timevalclear(&utv); 2456 TIMEVAL_TO_TIMESPEC(&utv, tsp); 2457 } 2458 return (error); 2459 } 2460 2461 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 2462 int 2463 linux_pselect6_time64(struct thread *td, 2464 struct linux_pselect6_time64_args *args) 2465 { 2466 struct l_timespec64 lts; 2467 struct timespec ts, *tsp; 2468 int error; 2469 2470 if (args->tsp != NULL) { 2471 error = copyin(args->tsp, <s, sizeof(lts)); 2472 if (error != 0) 2473 return (error); 2474 error = linux_to_native_timespec64(&ts, <s); 2475 if (error != 0) 2476 return (error); 2477 tsp = &ts; 2478 } else 2479 tsp = NULL; 2480 2481 error = linux_common_pselect6(td, args->nfds, args->readfds, 2482 args->writefds, args->exceptfds, tsp, args->sig); 2483 if (error != 0) 2484 return (error); 2485 2486 if (args->tsp != NULL) { 2487 error = native_to_linux_timespec64(<s, tsp); 2488 if (error == 0) 2489 error = copyout(<s, args->tsp, sizeof(lts)); 2490 } 2491 return (error); 2492 } 2493 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 2494 2495 int 2496 linux_ppoll(struct thread *td, struct linux_ppoll_args *args) 2497 { 2498 struct timespec uts, *tsp; 2499 struct l_timespec lts; 2500 int error; 2501 2502 if (args->tsp != NULL) { 2503 error = copyin(args->tsp, <s, sizeof(lts)); 2504 if (error) 2505 return (error); 2506 error = linux_to_native_timespec(&uts, <s); 2507 if (error != 0) 2508 return (error); 2509 tsp = &uts; 2510 } else 2511 tsp = NULL; 2512 2513 error = linux_common_ppoll(td, args->fds, args->nfds, tsp, 2514 args->sset, args->ssize); 2515 if (error != 0) 2516 return (error); 2517 if (tsp != NULL) { 2518 error = native_to_linux_timespec(<s, tsp); 2519 if (error == 0) 2520 error = copyout(<s, args->tsp, sizeof(lts)); 2521 } 2522 return (error); 2523 } 2524 2525 static int 2526 linux_common_ppoll(struct thread *td, struct pollfd *fds, uint32_t nfds, 2527 struct timespec *tsp, l_sigset_t *sset, l_size_t ssize) 2528 { 2529 struct timespec ts0, ts1; 2530 struct pollfd stackfds[32]; 2531 struct pollfd *kfds; 2532 l_sigset_t l_ss; 2533 sigset_t *ssp; 2534 sigset_t ss; 2535 int error; 2536 2537 if (kern_poll_maxfds(nfds)) 2538 return (EINVAL); 2539 if (sset != NULL) { 2540 if (ssize != sizeof(l_ss)) 2541 return (EINVAL); 2542 error = copyin(sset, &l_ss, sizeof(l_ss)); 2543 if (error) 2544 return (error); 2545 linux_to_bsd_sigset(&l_ss, &ss); 2546 ssp = &ss; 2547 } else 2548 ssp = NULL; 2549 if (tsp != NULL) 2550 nanotime(&ts0); 2551 2552 if (nfds > nitems(stackfds)) 2553 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK); 2554 else 2555 kfds = stackfds; 2556 error = linux_pollin(td, kfds, fds, nfds); 2557 if (error != 0) 2558 goto out; 2559 2560 error = kern_poll_kfds(td, kfds, nfds, tsp, ssp); 2561 if (error == 0) 2562 error = linux_pollout(td, kfds, fds, nfds); 2563 2564 if (error == 0 && tsp != NULL) { 2565 if (td->td_retval[0]) { 2566 nanotime(&ts1); 2567 timespecsub(&ts1, &ts0, &ts1); 2568 timespecsub(tsp, &ts1, tsp); 2569 if (tsp->tv_sec < 0) 2570 timespecclear(tsp); 2571 } else 2572 timespecclear(tsp); 2573 } 2574 2575 out: 2576 if (nfds > nitems(stackfds)) 2577 free(kfds, M_TEMP); 2578 return (error); 2579 } 2580 2581 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 2582 int 2583 linux_ppoll_time64(struct thread *td, struct linux_ppoll_time64_args *args) 2584 { 2585 struct timespec uts, *tsp; 2586 struct l_timespec64 lts; 2587 int error; 2588 2589 if (args->tsp != NULL) { 2590 error = copyin(args->tsp, <s, sizeof(lts)); 2591 if (error != 0) 2592 return (error); 2593 error = linux_to_native_timespec64(&uts, <s); 2594 if (error != 0) 2595 return (error); 2596 tsp = &uts; 2597 } else 2598 tsp = NULL; 2599 error = linux_common_ppoll(td, args->fds, args->nfds, tsp, 2600 args->sset, args->ssize); 2601 if (error != 0) 2602 return (error); 2603 if (tsp != NULL) { 2604 error = native_to_linux_timespec64(<s, tsp); 2605 if (error == 0) 2606 error = copyout(<s, args->tsp, sizeof(lts)); 2607 } 2608 return (error); 2609 } 2610 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 2611 2612 static int 2613 linux_pollin(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 2614 { 2615 int error; 2616 u_int i; 2617 2618 error = copyin(ufds, fds, nfd * sizeof(*fds)); 2619 if (error != 0) 2620 return (error); 2621 2622 for (i = 0; i < nfd; i++) { 2623 if (fds->events != 0) 2624 linux_to_bsd_poll_events(td, fds->fd, 2625 fds->events, &fds->events); 2626 fds++; 2627 } 2628 return (0); 2629 } 2630 2631 static int 2632 linux_pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 2633 { 2634 int error = 0; 2635 u_int i, n = 0; 2636 2637 for (i = 0; i < nfd; i++) { 2638 if (fds->revents != 0) { 2639 bsd_to_linux_poll_events(fds->revents, 2640 &fds->revents); 2641 n++; 2642 } 2643 error = copyout(&fds->revents, &ufds->revents, 2644 sizeof(ufds->revents)); 2645 if (error) 2646 return (error); 2647 fds++; 2648 ufds++; 2649 } 2650 td->td_retval[0] = n; 2651 return (0); 2652 } 2653 2654 int 2655 linux_sched_rr_get_interval(struct thread *td, 2656 struct linux_sched_rr_get_interval_args *uap) 2657 { 2658 struct timespec ts; 2659 struct l_timespec lts; 2660 struct thread *tdt; 2661 int error; 2662 2663 /* 2664 * According to man in case the invalid pid specified 2665 * EINVAL should be returned. 2666 */ 2667 if (uap->pid < 0) 2668 return (EINVAL); 2669 2670 tdt = linux_tdfind(td, uap->pid, -1); 2671 if (tdt == NULL) 2672 return (ESRCH); 2673 2674 error = kern_sched_rr_get_interval_td(td, tdt, &ts); 2675 PROC_UNLOCK(tdt->td_proc); 2676 if (error != 0) 2677 return (error); 2678 error = native_to_linux_timespec(<s, &ts); 2679 if (error != 0) 2680 return (error); 2681 return (copyout(<s, uap->interval, sizeof(lts))); 2682 } 2683 2684 /* 2685 * In case when the Linux thread is the initial thread in 2686 * the thread group thread id is equal to the process id. 2687 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2688 */ 2689 struct thread * 2690 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2691 { 2692 struct linux_emuldata *em; 2693 struct thread *tdt; 2694 struct proc *p; 2695 2696 tdt = NULL; 2697 if (tid == 0 || tid == td->td_tid) { 2698 tdt = td; 2699 PROC_LOCK(tdt->td_proc); 2700 } else if (tid > PID_MAX) 2701 tdt = tdfind(tid, pid); 2702 else { 2703 /* 2704 * Initial thread where the tid equal to the pid. 2705 */ 2706 p = pfind(tid); 2707 if (p != NULL) { 2708 if (SV_PROC_ABI(p) != SV_ABI_LINUX) { 2709 /* 2710 * p is not a Linuxulator process. 2711 */ 2712 PROC_UNLOCK(p); 2713 return (NULL); 2714 } 2715 FOREACH_THREAD_IN_PROC(p, tdt) { 2716 em = em_find(tdt); 2717 if (tid == em->em_tid) 2718 return (tdt); 2719 } 2720 PROC_UNLOCK(p); 2721 } 2722 return (NULL); 2723 } 2724 2725 return (tdt); 2726 } 2727 2728 void 2729 linux_to_bsd_waitopts(int options, int *bsdopts) 2730 { 2731 2732 if (options & LINUX_WNOHANG) 2733 *bsdopts |= WNOHANG; 2734 if (options & LINUX_WUNTRACED) 2735 *bsdopts |= WUNTRACED; 2736 if (options & LINUX_WEXITED) 2737 *bsdopts |= WEXITED; 2738 if (options & LINUX_WCONTINUED) 2739 *bsdopts |= WCONTINUED; 2740 if (options & LINUX_WNOWAIT) 2741 *bsdopts |= WNOWAIT; 2742 2743 if (options & __WCLONE) 2744 *bsdopts |= WLINUXCLONE; 2745 } 2746 2747 int 2748 linux_getrandom(struct thread *td, struct linux_getrandom_args *args) 2749 { 2750 struct uio uio; 2751 struct iovec iov; 2752 int error; 2753 2754 if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM)) 2755 return (EINVAL); 2756 if (args->count > INT_MAX) 2757 args->count = INT_MAX; 2758 2759 iov.iov_base = args->buf; 2760 iov.iov_len = args->count; 2761 2762 uio.uio_iov = &iov; 2763 uio.uio_iovcnt = 1; 2764 uio.uio_resid = iov.iov_len; 2765 uio.uio_segflg = UIO_USERSPACE; 2766 uio.uio_rw = UIO_READ; 2767 uio.uio_td = td; 2768 2769 error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK); 2770 if (error == 0) 2771 td->td_retval[0] = args->count - uio.uio_resid; 2772 return (error); 2773 } 2774 2775 int 2776 linux_mincore(struct thread *td, struct linux_mincore_args *args) 2777 { 2778 2779 /* Needs to be page-aligned */ 2780 if (args->start & PAGE_MASK) 2781 return (EINVAL); 2782 return (kern_mincore(td, args->start, args->len, args->vec)); 2783 } 2784 2785 #define SYSLOG_TAG "<6>" 2786 2787 int 2788 linux_syslog(struct thread *td, struct linux_syslog_args *args) 2789 { 2790 char buf[128], *src, *dst; 2791 u_int seq; 2792 int buflen, error; 2793 2794 if (args->type != LINUX_SYSLOG_ACTION_READ_ALL) { 2795 linux_msg(td, "syslog unsupported type 0x%x", args->type); 2796 return (EINVAL); 2797 } 2798 2799 if (args->len < 6) { 2800 td->td_retval[0] = 0; 2801 return (0); 2802 } 2803 2804 error = priv_check(td, PRIV_MSGBUF); 2805 if (error) 2806 return (error); 2807 2808 mtx_lock(&msgbuf_lock); 2809 msgbuf_peekbytes(msgbufp, NULL, 0, &seq); 2810 mtx_unlock(&msgbuf_lock); 2811 2812 dst = args->buf; 2813 error = copyout(&SYSLOG_TAG, dst, sizeof(SYSLOG_TAG)); 2814 /* The -1 is to skip the trailing '\0'. */ 2815 dst += sizeof(SYSLOG_TAG) - 1; 2816 2817 while (error == 0) { 2818 mtx_lock(&msgbuf_lock); 2819 buflen = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq); 2820 mtx_unlock(&msgbuf_lock); 2821 2822 if (buflen == 0) 2823 break; 2824 2825 for (src = buf; src < buf + buflen && error == 0; src++) { 2826 if (*src == '\0') 2827 continue; 2828 2829 if (dst >= args->buf + args->len) 2830 goto out; 2831 2832 error = copyout(src, dst, 1); 2833 dst++; 2834 2835 if (*src == '\n' && *(src + 1) != '<' && 2836 dst + sizeof(SYSLOG_TAG) < args->buf + args->len) { 2837 error = copyout(&SYSLOG_TAG, 2838 dst, sizeof(SYSLOG_TAG)); 2839 dst += sizeof(SYSLOG_TAG) - 1; 2840 } 2841 } 2842 } 2843 out: 2844 td->td_retval[0] = dst - args->buf; 2845 return (error); 2846 } 2847 2848 int 2849 linux_getcpu(struct thread *td, struct linux_getcpu_args *args) 2850 { 2851 int cpu, error, node; 2852 2853 cpu = td->td_oncpu; /* Make sure it doesn't change during copyout(9) */ 2854 error = 0; 2855 node = cpuid_to_pcpu[cpu]->pc_domain; 2856 2857 if (args->cpu != NULL) 2858 error = copyout(&cpu, args->cpu, sizeof(l_int)); 2859 if (args->node != NULL) 2860 error = copyout(&node, args->node, sizeof(l_int)); 2861 return (error); 2862 } 2863 2864 #if defined(__i386__) || defined(__amd64__) 2865 int 2866 linux_poll(struct thread *td, struct linux_poll_args *args) 2867 { 2868 struct timespec ts, *tsp; 2869 2870 if (args->timeout != INFTIM) { 2871 if (args->timeout < 0) 2872 return (EINVAL); 2873 ts.tv_sec = args->timeout / 1000; 2874 ts.tv_nsec = (args->timeout % 1000) * 1000000; 2875 tsp = &ts; 2876 } else 2877 tsp = NULL; 2878 2879 return (linux_common_ppoll(td, args->fds, args->nfds, 2880 tsp, NULL, 0)); 2881 } 2882 #endif /* __i386__ || __amd64__ */ 2883