1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Doug Rabson 5 * Copyright (c) 1994-1995 Søren Schmidt 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/blist.h> 39 #include <sys/fcntl.h> 40 #if defined(__i386__) 41 #include <sys/imgact_aout.h> 42 #endif 43 #include <sys/jail.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mman.h> 49 #include <sys/mount.h> 50 #include <sys/msgbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/namei.h> 53 #include <sys/poll.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/procctl.h> 57 #include <sys/reboot.h> 58 #include <sys/racct.h> 59 #include <sys/random.h> 60 #include <sys/resourcevar.h> 61 #include <sys/sched.h> 62 #include <sys/sdt.h> 63 #include <sys/signalvar.h> 64 #include <sys/stat.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/sysctl.h> 67 #include <sys/sysproto.h> 68 #include <sys/systm.h> 69 #include <sys/time.h> 70 #include <sys/vmmeter.h> 71 #include <sys/vnode.h> 72 #include <sys/wait.h> 73 #include <sys/cpuset.h> 74 #include <sys/uio.h> 75 76 #include <security/mac/mac_framework.h> 77 78 #include <vm/vm.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_kern.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_extern.h> 83 #include <vm/swap_pager.h> 84 85 #ifdef COMPAT_LINUX32 86 #include <machine/../linux32/linux.h> 87 #include <machine/../linux32/linux32_proto.h> 88 #else 89 #include <machine/../linux/linux.h> 90 #include <machine/../linux/linux_proto.h> 91 #endif 92 93 #include <compat/linux/linux_common.h> 94 #include <compat/linux/linux_dtrace.h> 95 #include <compat/linux/linux_file.h> 96 #include <compat/linux/linux_mib.h> 97 #include <compat/linux/linux_signal.h> 98 #include <compat/linux/linux_timer.h> 99 #include <compat/linux/linux_util.h> 100 #include <compat/linux/linux_sysproto.h> 101 #include <compat/linux/linux_emul.h> 102 #include <compat/linux/linux_misc.h> 103 104 int stclohz; /* Statistics clock frequency */ 105 106 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 107 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 108 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 109 RLIMIT_MEMLOCK, RLIMIT_AS 110 }; 111 112 struct l_sysinfo { 113 l_long uptime; /* Seconds since boot */ 114 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 115 #define LINUX_SYSINFO_LOADS_SCALE 65536 116 l_ulong totalram; /* Total usable main memory size */ 117 l_ulong freeram; /* Available memory size */ 118 l_ulong sharedram; /* Amount of shared memory */ 119 l_ulong bufferram; /* Memory used by buffers */ 120 l_ulong totalswap; /* Total swap space size */ 121 l_ulong freeswap; /* swap space still available */ 122 l_ushort procs; /* Number of current processes */ 123 l_ushort pads; 124 l_ulong totalhigh; 125 l_ulong freehigh; 126 l_uint mem_unit; 127 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 128 }; 129 130 struct l_pselect6arg { 131 l_uintptr_t ss; 132 l_size_t ss_len; 133 }; 134 135 static int linux_utimensat_lts_to_ts(struct l_timespec *, 136 struct timespec *); 137 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 138 static int linux_utimensat_lts64_to_ts(struct l_timespec64 *, 139 struct timespec *); 140 #endif 141 static int linux_common_utimensat(struct thread *, int, 142 const char *, struct timespec *, int); 143 static int linux_common_pselect6(struct thread *, l_int, 144 l_fd_set *, l_fd_set *, l_fd_set *, 145 struct timespec *, l_uintptr_t *); 146 static int linux_common_ppoll(struct thread *, struct pollfd *, 147 uint32_t, struct timespec *, l_sigset_t *, 148 l_size_t); 149 static int linux_pollin(struct thread *, struct pollfd *, 150 struct pollfd *, u_int); 151 static int linux_pollout(struct thread *, struct pollfd *, 152 struct pollfd *, u_int); 153 154 int 155 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 156 { 157 struct l_sysinfo sysinfo; 158 int i, j; 159 struct timespec ts; 160 161 bzero(&sysinfo, sizeof(sysinfo)); 162 getnanouptime(&ts); 163 if (ts.tv_nsec != 0) 164 ts.tv_sec++; 165 sysinfo.uptime = ts.tv_sec; 166 167 /* Use the information from the mib to get our load averages */ 168 for (i = 0; i < 3; i++) 169 sysinfo.loads[i] = averunnable.ldavg[i] * 170 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 171 172 sysinfo.totalram = physmem * PAGE_SIZE; 173 sysinfo.freeram = (u_long)vm_free_count() * PAGE_SIZE; 174 175 /* 176 * sharedram counts pages allocated to named, swap-backed objects such 177 * as shared memory segments and tmpfs files. There is no cheap way to 178 * compute this, so just leave the field unpopulated. Linux itself only 179 * started setting this field in the 3.x timeframe. 180 */ 181 sysinfo.sharedram = 0; 182 sysinfo.bufferram = 0; 183 184 swap_pager_status(&i, &j); 185 sysinfo.totalswap = i * PAGE_SIZE; 186 sysinfo.freeswap = (i - j) * PAGE_SIZE; 187 188 sysinfo.procs = nprocs; 189 190 /* 191 * Platforms supported by the emulation layer do not have a notion of 192 * high memory. 193 */ 194 sysinfo.totalhigh = 0; 195 sysinfo.freehigh = 0; 196 197 sysinfo.mem_unit = 1; 198 199 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 200 } 201 202 #ifdef LINUX_LEGACY_SYSCALLS 203 int 204 linux_alarm(struct thread *td, struct linux_alarm_args *args) 205 { 206 struct itimerval it, old_it; 207 u_int secs; 208 int error; 209 210 secs = args->secs; 211 /* 212 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2 213 * to match kern_setitimer()'s limit to avoid error from it. 214 * 215 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit 216 * platforms. 217 */ 218 if (secs > INT32_MAX / 2) 219 secs = INT32_MAX / 2; 220 221 it.it_value.tv_sec = secs; 222 it.it_value.tv_usec = 0; 223 timevalclear(&it.it_interval); 224 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 225 KASSERT(error == 0, ("kern_setitimer returns %d", error)); 226 227 if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) || 228 old_it.it_value.tv_usec >= 500000) 229 old_it.it_value.tv_sec++; 230 td->td_retval[0] = old_it.it_value.tv_sec; 231 return (0); 232 } 233 #endif 234 235 int 236 linux_brk(struct thread *td, struct linux_brk_args *args) 237 { 238 struct vmspace *vm = td->td_proc->p_vmspace; 239 uintptr_t new, old; 240 241 old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize); 242 new = (uintptr_t)args->dsend; 243 if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new)) 244 td->td_retval[0] = (register_t)new; 245 else 246 td->td_retval[0] = (register_t)old; 247 248 return (0); 249 } 250 251 #if defined(__i386__) 252 /* XXX: what about amd64/linux32? */ 253 254 int 255 linux_uselib(struct thread *td, struct linux_uselib_args *args) 256 { 257 struct nameidata ni; 258 struct vnode *vp; 259 struct exec *a_out; 260 vm_map_t map; 261 vm_map_entry_t entry; 262 struct vattr attr; 263 vm_offset_t vmaddr; 264 unsigned long file_offset; 265 unsigned long bss_size; 266 char *library; 267 ssize_t aresid; 268 int error; 269 bool locked, opened, textset; 270 271 a_out = NULL; 272 vp = NULL; 273 locked = false; 274 textset = false; 275 opened = false; 276 277 if (!LUSECONVPATH(td)) { 278 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 279 UIO_USERSPACE, args->library, td); 280 error = namei(&ni); 281 } else { 282 LCONVPATHEXIST(td, args->library, &library); 283 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 284 UIO_SYSSPACE, library, td); 285 error = namei(&ni); 286 LFREEPATH(library); 287 } 288 if (error) 289 goto cleanup; 290 291 vp = ni.ni_vp; 292 NDFREE(&ni, NDF_ONLY_PNBUF); 293 294 /* 295 * From here on down, we have a locked vnode that must be unlocked. 296 * XXX: The code below largely duplicates exec_check_permissions(). 297 */ 298 locked = true; 299 300 /* Executable? */ 301 error = VOP_GETATTR(vp, &attr, td->td_ucred); 302 if (error) 303 goto cleanup; 304 305 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 306 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 307 /* EACCESS is what exec(2) returns. */ 308 error = ENOEXEC; 309 goto cleanup; 310 } 311 312 /* Sensible size? */ 313 if (attr.va_size == 0) { 314 error = ENOEXEC; 315 goto cleanup; 316 } 317 318 /* Can we access it? */ 319 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 320 if (error) 321 goto cleanup; 322 323 /* 324 * XXX: This should use vn_open() so that it is properly authorized, 325 * and to reduce code redundancy all over the place here. 326 * XXX: Not really, it duplicates far more of exec_check_permissions() 327 * than vn_open(). 328 */ 329 #ifdef MAC 330 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 331 if (error) 332 goto cleanup; 333 #endif 334 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 335 if (error) 336 goto cleanup; 337 opened = true; 338 339 /* Pull in executable header into exec_map */ 340 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 341 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 342 if (error) 343 goto cleanup; 344 345 /* Is it a Linux binary ? */ 346 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 347 error = ENOEXEC; 348 goto cleanup; 349 } 350 351 /* 352 * While we are here, we should REALLY do some more checks 353 */ 354 355 /* Set file/virtual offset based on a.out variant. */ 356 switch ((int)(a_out->a_magic & 0xffff)) { 357 case 0413: /* ZMAGIC */ 358 file_offset = 1024; 359 break; 360 case 0314: /* QMAGIC */ 361 file_offset = 0; 362 break; 363 default: 364 error = ENOEXEC; 365 goto cleanup; 366 } 367 368 bss_size = round_page(a_out->a_bss); 369 370 /* Check various fields in header for validity/bounds. */ 371 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 372 error = ENOEXEC; 373 goto cleanup; 374 } 375 376 /* text + data can't exceed file size */ 377 if (a_out->a_data + a_out->a_text > attr.va_size) { 378 error = EFAULT; 379 goto cleanup; 380 } 381 382 /* 383 * text/data/bss must not exceed limits 384 * XXX - this is not complete. it should check current usage PLUS 385 * the resources needed by this library. 386 */ 387 PROC_LOCK(td->td_proc); 388 if (a_out->a_text > maxtsiz || 389 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 390 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 391 bss_size) != 0) { 392 PROC_UNLOCK(td->td_proc); 393 error = ENOMEM; 394 goto cleanup; 395 } 396 PROC_UNLOCK(td->td_proc); 397 398 /* 399 * Prevent more writers. 400 */ 401 error = VOP_SET_TEXT(vp); 402 if (error != 0) 403 goto cleanup; 404 textset = true; 405 406 /* 407 * Lock no longer needed 408 */ 409 locked = false; 410 VOP_UNLOCK(vp); 411 412 /* 413 * Check if file_offset page aligned. Currently we cannot handle 414 * misalinged file offsets, and so we read in the entire image 415 * (what a waste). 416 */ 417 if (file_offset & PAGE_MASK) { 418 /* Map text+data read/write/execute */ 419 420 /* a_entry is the load address and is page aligned */ 421 vmaddr = trunc_page(a_out->a_entry); 422 423 /* get anon user mapping, read+write+execute */ 424 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 425 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 426 VM_PROT_ALL, VM_PROT_ALL, 0); 427 if (error) 428 goto cleanup; 429 430 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 431 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 432 td->td_ucred, NOCRED, &aresid, td); 433 if (error != 0) 434 goto cleanup; 435 if (aresid != 0) { 436 error = ENOEXEC; 437 goto cleanup; 438 } 439 } else { 440 /* 441 * for QMAGIC, a_entry is 20 bytes beyond the load address 442 * to skip the executable header 443 */ 444 vmaddr = trunc_page(a_out->a_entry); 445 446 /* 447 * Map it all into the process's space as a single 448 * copy-on-write "data" segment. 449 */ 450 map = &td->td_proc->p_vmspace->vm_map; 451 error = vm_mmap(map, &vmaddr, 452 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 453 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 454 if (error) 455 goto cleanup; 456 vm_map_lock(map); 457 if (!vm_map_lookup_entry(map, vmaddr, &entry)) { 458 vm_map_unlock(map); 459 error = EDOOFUS; 460 goto cleanup; 461 } 462 entry->eflags |= MAP_ENTRY_VN_EXEC; 463 vm_map_unlock(map); 464 textset = false; 465 } 466 467 if (bss_size != 0) { 468 /* Calculate BSS start address */ 469 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 470 a_out->a_data; 471 472 /* allocate some 'anon' space */ 473 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 474 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 475 VM_PROT_ALL, 0); 476 if (error) 477 goto cleanup; 478 } 479 480 cleanup: 481 if (opened) { 482 if (locked) 483 VOP_UNLOCK(vp); 484 locked = false; 485 VOP_CLOSE(vp, FREAD, td->td_ucred, td); 486 } 487 if (textset) { 488 if (!locked) { 489 locked = true; 490 VOP_LOCK(vp, LK_SHARED | LK_RETRY); 491 } 492 VOP_UNSET_TEXT_CHECKED(vp); 493 } 494 if (locked) 495 VOP_UNLOCK(vp); 496 497 /* Release the temporary mapping. */ 498 if (a_out) 499 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 500 501 return (error); 502 } 503 504 #endif /* __i386__ */ 505 506 #ifdef LINUX_LEGACY_SYSCALLS 507 int 508 linux_select(struct thread *td, struct linux_select_args *args) 509 { 510 l_timeval ltv; 511 struct timeval tv0, tv1, utv, *tvp; 512 int error; 513 514 /* 515 * Store current time for computation of the amount of 516 * time left. 517 */ 518 if (args->timeout) { 519 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 520 goto select_out; 521 utv.tv_sec = ltv.tv_sec; 522 utv.tv_usec = ltv.tv_usec; 523 524 if (itimerfix(&utv)) { 525 /* 526 * The timeval was invalid. Convert it to something 527 * valid that will act as it does under Linux. 528 */ 529 utv.tv_sec += utv.tv_usec / 1000000; 530 utv.tv_usec %= 1000000; 531 if (utv.tv_usec < 0) { 532 utv.tv_sec -= 1; 533 utv.tv_usec += 1000000; 534 } 535 if (utv.tv_sec < 0) 536 timevalclear(&utv); 537 } 538 microtime(&tv0); 539 tvp = &utv; 540 } else 541 tvp = NULL; 542 543 error = kern_select(td, args->nfds, args->readfds, args->writefds, 544 args->exceptfds, tvp, LINUX_NFDBITS); 545 if (error) 546 goto select_out; 547 548 if (args->timeout) { 549 if (td->td_retval[0]) { 550 /* 551 * Compute how much time was left of the timeout, 552 * by subtracting the current time and the time 553 * before we started the call, and subtracting 554 * that result from the user-supplied value. 555 */ 556 microtime(&tv1); 557 timevalsub(&tv1, &tv0); 558 timevalsub(&utv, &tv1); 559 if (utv.tv_sec < 0) 560 timevalclear(&utv); 561 } else 562 timevalclear(&utv); 563 ltv.tv_sec = utv.tv_sec; 564 ltv.tv_usec = utv.tv_usec; 565 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 566 goto select_out; 567 } 568 569 select_out: 570 return (error); 571 } 572 #endif 573 574 int 575 linux_mremap(struct thread *td, struct linux_mremap_args *args) 576 { 577 uintptr_t addr; 578 size_t len; 579 int error = 0; 580 581 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 582 td->td_retval[0] = 0; 583 return (EINVAL); 584 } 585 586 /* 587 * Check for the page alignment. 588 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 589 */ 590 if (args->addr & PAGE_MASK) { 591 td->td_retval[0] = 0; 592 return (EINVAL); 593 } 594 595 args->new_len = round_page(args->new_len); 596 args->old_len = round_page(args->old_len); 597 598 if (args->new_len > args->old_len) { 599 td->td_retval[0] = 0; 600 return (ENOMEM); 601 } 602 603 if (args->new_len < args->old_len) { 604 addr = args->addr + args->new_len; 605 len = args->old_len - args->new_len; 606 error = kern_munmap(td, addr, len); 607 } 608 609 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 610 return (error); 611 } 612 613 #define LINUX_MS_ASYNC 0x0001 614 #define LINUX_MS_INVALIDATE 0x0002 615 #define LINUX_MS_SYNC 0x0004 616 617 int 618 linux_msync(struct thread *td, struct linux_msync_args *args) 619 { 620 621 return (kern_msync(td, args->addr, args->len, 622 args->fl & ~LINUX_MS_SYNC)); 623 } 624 625 #ifdef LINUX_LEGACY_SYSCALLS 626 int 627 linux_time(struct thread *td, struct linux_time_args *args) 628 { 629 struct timeval tv; 630 l_time_t tm; 631 int error; 632 633 microtime(&tv); 634 tm = tv.tv_sec; 635 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 636 return (error); 637 td->td_retval[0] = tm; 638 return (0); 639 } 640 #endif 641 642 struct l_times_argv { 643 l_clock_t tms_utime; 644 l_clock_t tms_stime; 645 l_clock_t tms_cutime; 646 l_clock_t tms_cstime; 647 }; 648 649 /* 650 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 651 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 652 * auxiliary vector entry. 653 */ 654 #define CLK_TCK 100 655 656 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 657 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 658 659 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 660 CONVNTCK(r) : CONVOTCK(r)) 661 662 int 663 linux_times(struct thread *td, struct linux_times_args *args) 664 { 665 struct timeval tv, utime, stime, cutime, cstime; 666 struct l_times_argv tms; 667 struct proc *p; 668 int error; 669 670 if (args->buf != NULL) { 671 p = td->td_proc; 672 PROC_LOCK(p); 673 PROC_STATLOCK(p); 674 calcru(p, &utime, &stime); 675 PROC_STATUNLOCK(p); 676 calccru(p, &cutime, &cstime); 677 PROC_UNLOCK(p); 678 679 tms.tms_utime = CONVTCK(utime); 680 tms.tms_stime = CONVTCK(stime); 681 682 tms.tms_cutime = CONVTCK(cutime); 683 tms.tms_cstime = CONVTCK(cstime); 684 685 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 686 return (error); 687 } 688 689 microuptime(&tv); 690 td->td_retval[0] = (int)CONVTCK(tv); 691 return (0); 692 } 693 694 int 695 linux_newuname(struct thread *td, struct linux_newuname_args *args) 696 { 697 struct l_new_utsname utsname; 698 char osname[LINUX_MAX_UTSNAME]; 699 char osrelease[LINUX_MAX_UTSNAME]; 700 char *p; 701 702 linux_get_osname(td, osname); 703 linux_get_osrelease(td, osrelease); 704 705 bzero(&utsname, sizeof(utsname)); 706 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 707 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 708 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 709 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 710 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 711 for (p = utsname.version; *p != '\0'; ++p) 712 if (*p == '\n') { 713 *p = '\0'; 714 break; 715 } 716 #if defined(__amd64__) 717 /* 718 * On amd64, Linux uname(2) needs to return "x86_64" 719 * for both 64-bit and 32-bit applications. On 32-bit, 720 * the string returned by getauxval(AT_PLATFORM) needs 721 * to remain "i686", though. 722 */ 723 strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME); 724 #elif defined(__aarch64__) 725 strlcpy(utsname.machine, "aarch64", LINUX_MAX_UTSNAME); 726 #elif defined(__i386__) 727 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME); 728 #endif 729 730 return (copyout(&utsname, args->buf, sizeof(utsname))); 731 } 732 733 struct l_utimbuf { 734 l_time_t l_actime; 735 l_time_t l_modtime; 736 }; 737 738 #ifdef LINUX_LEGACY_SYSCALLS 739 int 740 linux_utime(struct thread *td, struct linux_utime_args *args) 741 { 742 struct timeval tv[2], *tvp; 743 struct l_utimbuf lut; 744 char *fname; 745 int error; 746 747 if (args->times) { 748 if ((error = copyin(args->times, &lut, sizeof lut)) != 0) 749 return (error); 750 tv[0].tv_sec = lut.l_actime; 751 tv[0].tv_usec = 0; 752 tv[1].tv_sec = lut.l_modtime; 753 tv[1].tv_usec = 0; 754 tvp = tv; 755 } else 756 tvp = NULL; 757 758 if (!LUSECONVPATH(td)) { 759 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 760 tvp, UIO_SYSSPACE); 761 } else { 762 LCONVPATHEXIST(td, args->fname, &fname); 763 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp, 764 UIO_SYSSPACE); 765 LFREEPATH(fname); 766 } 767 return (error); 768 } 769 #endif 770 771 #ifdef LINUX_LEGACY_SYSCALLS 772 int 773 linux_utimes(struct thread *td, struct linux_utimes_args *args) 774 { 775 l_timeval ltv[2]; 776 struct timeval tv[2], *tvp = NULL; 777 char *fname; 778 int error; 779 780 if (args->tptr != NULL) { 781 if ((error = copyin(args->tptr, ltv, sizeof ltv)) != 0) 782 return (error); 783 tv[0].tv_sec = ltv[0].tv_sec; 784 tv[0].tv_usec = ltv[0].tv_usec; 785 tv[1].tv_sec = ltv[1].tv_sec; 786 tv[1].tv_usec = ltv[1].tv_usec; 787 tvp = tv; 788 } 789 790 if (!LUSECONVPATH(td)) { 791 error = kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE, 792 tvp, UIO_SYSSPACE); 793 } else { 794 LCONVPATHEXIST(td, args->fname, &fname); 795 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, 796 tvp, UIO_SYSSPACE); 797 LFREEPATH(fname); 798 } 799 return (error); 800 } 801 #endif 802 803 static int 804 linux_utimensat_lts_to_ts(struct l_timespec *l_times, struct timespec *times) 805 { 806 807 if (l_times->tv_nsec != LINUX_UTIME_OMIT && 808 l_times->tv_nsec != LINUX_UTIME_NOW && 809 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999)) 810 return (EINVAL); 811 812 times->tv_sec = l_times->tv_sec; 813 switch (l_times->tv_nsec) 814 { 815 case LINUX_UTIME_OMIT: 816 times->tv_nsec = UTIME_OMIT; 817 break; 818 case LINUX_UTIME_NOW: 819 times->tv_nsec = UTIME_NOW; 820 break; 821 default: 822 times->tv_nsec = l_times->tv_nsec; 823 } 824 825 return (0); 826 } 827 828 static int 829 linux_common_utimensat(struct thread *td, int ldfd, const char *pathname, 830 struct timespec *timesp, int lflags) 831 { 832 char *path = NULL; 833 int error, dfd, flags = 0; 834 835 dfd = (ldfd == LINUX_AT_FDCWD) ? AT_FDCWD : ldfd; 836 837 if (lflags & ~(LINUX_AT_SYMLINK_NOFOLLOW | LINUX_AT_EMPTY_PATH)) 838 return (EINVAL); 839 840 if (timesp != NULL) { 841 /* This breaks POSIX, but is what the Linux kernel does 842 * _on purpose_ (documented in the man page for utimensat(2)), 843 * so we must follow that behaviour. */ 844 if (timesp[0].tv_nsec == UTIME_OMIT && 845 timesp[1].tv_nsec == UTIME_OMIT) 846 return (0); 847 } 848 849 if (lflags & LINUX_AT_SYMLINK_NOFOLLOW) 850 flags |= AT_SYMLINK_NOFOLLOW; 851 if (lflags & LINUX_AT_EMPTY_PATH) 852 flags |= AT_EMPTY_PATH; 853 854 if (!LUSECONVPATH(td)) { 855 if (pathname != NULL) { 856 return (kern_utimensat(td, dfd, pathname, 857 UIO_USERSPACE, timesp, UIO_SYSSPACE, flags)); 858 } 859 } 860 861 if (pathname != NULL) 862 LCONVPATHEXIST_AT(td, pathname, &path, dfd); 863 else if (lflags != 0) 864 return (EINVAL); 865 866 if (path == NULL) 867 error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE); 868 else { 869 error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp, 870 UIO_SYSSPACE, flags); 871 LFREEPATH(path); 872 } 873 874 return (error); 875 } 876 877 int 878 linux_utimensat(struct thread *td, struct linux_utimensat_args *args) 879 { 880 struct l_timespec l_times[2]; 881 struct timespec times[2], *timesp; 882 int error; 883 884 if (args->times != NULL) { 885 error = copyin(args->times, l_times, sizeof(l_times)); 886 if (error != 0) 887 return (error); 888 889 error = linux_utimensat_lts_to_ts(&l_times[0], ×[0]); 890 if (error != 0) 891 return (error); 892 error = linux_utimensat_lts_to_ts(&l_times[1], ×[1]); 893 if (error != 0) 894 return (error); 895 timesp = times; 896 } else 897 timesp = NULL; 898 899 return (linux_common_utimensat(td, args->dfd, args->pathname, 900 timesp, args->flags)); 901 } 902 903 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 904 static int 905 linux_utimensat_lts64_to_ts(struct l_timespec64 *l_times, struct timespec *times) 906 { 907 908 if (l_times->tv_nsec != LINUX_UTIME_OMIT && 909 l_times->tv_nsec != LINUX_UTIME_NOW && 910 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999)) 911 return (EINVAL); 912 913 times->tv_sec = l_times->tv_sec; 914 switch (l_times->tv_nsec) 915 { 916 case LINUX_UTIME_OMIT: 917 times->tv_nsec = UTIME_OMIT; 918 break; 919 case LINUX_UTIME_NOW: 920 times->tv_nsec = UTIME_NOW; 921 break; 922 default: 923 times->tv_nsec = l_times->tv_nsec; 924 } 925 926 return (0); 927 } 928 929 int 930 linux_utimensat_time64(struct thread *td, struct linux_utimensat_time64_args *args) 931 { 932 struct l_timespec64 l_times[2]; 933 struct timespec times[2], *timesp; 934 int error; 935 936 if (args->times64 != NULL) { 937 error = copyin(args->times64, l_times, sizeof(l_times)); 938 if (error != 0) 939 return (error); 940 941 error = linux_utimensat_lts64_to_ts(&l_times[0], ×[0]); 942 if (error != 0) 943 return (error); 944 error = linux_utimensat_lts64_to_ts(&l_times[1], ×[1]); 945 if (error != 0) 946 return (error); 947 timesp = times; 948 } else 949 timesp = NULL; 950 951 return (linux_common_utimensat(td, args->dfd, args->pathname, 952 timesp, args->flags)); 953 } 954 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 955 956 #ifdef LINUX_LEGACY_SYSCALLS 957 int 958 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 959 { 960 l_timeval ltv[2]; 961 struct timeval tv[2], *tvp = NULL; 962 char *fname; 963 int error, dfd; 964 965 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 966 967 if (args->utimes != NULL) { 968 if ((error = copyin(args->utimes, ltv, sizeof ltv)) != 0) 969 return (error); 970 tv[0].tv_sec = ltv[0].tv_sec; 971 tv[0].tv_usec = ltv[0].tv_usec; 972 tv[1].tv_sec = ltv[1].tv_sec; 973 tv[1].tv_usec = ltv[1].tv_usec; 974 tvp = tv; 975 } 976 977 if (!LUSECONVPATH(td)) { 978 error = kern_utimesat(td, dfd, args->filename, UIO_USERSPACE, 979 tvp, UIO_SYSSPACE); 980 } else { 981 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 982 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, 983 tvp, UIO_SYSSPACE); 984 LFREEPATH(fname); 985 } 986 return (error); 987 } 988 #endif 989 990 static int 991 linux_common_wait(struct thread *td, int pid, int *statusp, 992 int options, struct __wrusage *wrup) 993 { 994 siginfo_t siginfo; 995 idtype_t idtype; 996 id_t id; 997 int error, status, tmpstat; 998 999 if (pid == WAIT_ANY) { 1000 idtype = P_ALL; 1001 id = 0; 1002 } else if (pid < 0) { 1003 idtype = P_PGID; 1004 id = (id_t)-pid; 1005 } else { 1006 idtype = P_PID; 1007 id = (id_t)pid; 1008 } 1009 1010 /* 1011 * For backward compatibility we implicitly add flags WEXITED 1012 * and WTRAPPED here. 1013 */ 1014 options |= WEXITED | WTRAPPED; 1015 error = kern_wait6(td, idtype, id, &status, options, wrup, &siginfo); 1016 if (error) 1017 return (error); 1018 1019 if (statusp) { 1020 tmpstat = status & 0xffff; 1021 if (WIFSIGNALED(tmpstat)) { 1022 tmpstat = (tmpstat & 0xffffff80) | 1023 bsd_to_linux_signal(WTERMSIG(tmpstat)); 1024 } else if (WIFSTOPPED(tmpstat)) { 1025 tmpstat = (tmpstat & 0xffff00ff) | 1026 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8); 1027 #if defined(__amd64__) && !defined(COMPAT_LINUX32) 1028 if (WSTOPSIG(status) == SIGTRAP) { 1029 tmpstat = linux_ptrace_status(td, 1030 siginfo.si_pid, tmpstat); 1031 } 1032 #endif 1033 } else if (WIFCONTINUED(tmpstat)) { 1034 tmpstat = 0xffff; 1035 } 1036 error = copyout(&tmpstat, statusp, sizeof(int)); 1037 } 1038 1039 return (error); 1040 } 1041 1042 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1043 int 1044 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 1045 { 1046 struct linux_wait4_args wait4_args; 1047 1048 wait4_args.pid = args->pid; 1049 wait4_args.status = args->status; 1050 wait4_args.options = args->options; 1051 wait4_args.rusage = NULL; 1052 1053 return (linux_wait4(td, &wait4_args)); 1054 } 1055 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1056 1057 int 1058 linux_wait4(struct thread *td, struct linux_wait4_args *args) 1059 { 1060 int error, options; 1061 struct __wrusage wru, *wrup; 1062 1063 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG | 1064 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 1065 return (EINVAL); 1066 1067 options = WEXITED; 1068 linux_to_bsd_waitopts(args->options, &options); 1069 1070 if (args->rusage != NULL) 1071 wrup = &wru; 1072 else 1073 wrup = NULL; 1074 error = linux_common_wait(td, args->pid, args->status, options, wrup); 1075 if (error != 0) 1076 return (error); 1077 if (args->rusage != NULL) 1078 error = linux_copyout_rusage(&wru.wru_self, args->rusage); 1079 return (error); 1080 } 1081 1082 int 1083 linux_waitid(struct thread *td, struct linux_waitid_args *args) 1084 { 1085 int status, options, sig; 1086 struct __wrusage wru; 1087 siginfo_t siginfo; 1088 l_siginfo_t lsi; 1089 idtype_t idtype; 1090 struct proc *p; 1091 int error; 1092 1093 options = 0; 1094 linux_to_bsd_waitopts(args->options, &options); 1095 1096 if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED)) 1097 return (EINVAL); 1098 if (!(options & (WEXITED | WUNTRACED | WCONTINUED))) 1099 return (EINVAL); 1100 1101 switch (args->idtype) { 1102 case LINUX_P_ALL: 1103 idtype = P_ALL; 1104 break; 1105 case LINUX_P_PID: 1106 if (args->id <= 0) 1107 return (EINVAL); 1108 idtype = P_PID; 1109 break; 1110 case LINUX_P_PGID: 1111 if (args->id <= 0) 1112 return (EINVAL); 1113 idtype = P_PGID; 1114 break; 1115 default: 1116 return (EINVAL); 1117 } 1118 1119 error = kern_wait6(td, idtype, args->id, &status, options, 1120 &wru, &siginfo); 1121 if (error != 0) 1122 return (error); 1123 if (args->rusage != NULL) { 1124 error = linux_copyout_rusage(&wru.wru_children, 1125 args->rusage); 1126 if (error != 0) 1127 return (error); 1128 } 1129 if (args->info != NULL) { 1130 p = td->td_proc; 1131 bzero(&lsi, sizeof(lsi)); 1132 if (td->td_retval[0] != 0) { 1133 sig = bsd_to_linux_signal(siginfo.si_signo); 1134 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 1135 } 1136 error = copyout(&lsi, args->info, sizeof(lsi)); 1137 } 1138 td->td_retval[0] = 0; 1139 1140 return (error); 1141 } 1142 1143 #ifdef LINUX_LEGACY_SYSCALLS 1144 int 1145 linux_mknod(struct thread *td, struct linux_mknod_args *args) 1146 { 1147 char *path; 1148 int error; 1149 enum uio_seg seg; 1150 bool convpath; 1151 1152 convpath = LUSECONVPATH(td); 1153 if (!convpath) { 1154 path = args->path; 1155 seg = UIO_USERSPACE; 1156 } else { 1157 LCONVPATHCREAT(td, args->path, &path); 1158 seg = UIO_SYSSPACE; 1159 } 1160 1161 switch (args->mode & S_IFMT) { 1162 case S_IFIFO: 1163 case S_IFSOCK: 1164 error = kern_mkfifoat(td, AT_FDCWD, path, seg, 1165 args->mode); 1166 break; 1167 1168 case S_IFCHR: 1169 case S_IFBLK: 1170 error = kern_mknodat(td, AT_FDCWD, path, seg, 1171 args->mode, args->dev); 1172 break; 1173 1174 case S_IFDIR: 1175 error = EPERM; 1176 break; 1177 1178 case 0: 1179 args->mode |= S_IFREG; 1180 /* FALLTHROUGH */ 1181 case S_IFREG: 1182 error = kern_openat(td, AT_FDCWD, path, seg, 1183 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1184 if (error == 0) 1185 kern_close(td, td->td_retval[0]); 1186 break; 1187 1188 default: 1189 error = EINVAL; 1190 break; 1191 } 1192 if (convpath) 1193 LFREEPATH(path); 1194 return (error); 1195 } 1196 #endif 1197 1198 int 1199 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1200 { 1201 char *path; 1202 int error, dfd; 1203 enum uio_seg seg; 1204 bool convpath; 1205 1206 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1207 1208 convpath = LUSECONVPATH(td); 1209 if (!convpath) { 1210 path = __DECONST(char *, args->filename); 1211 seg = UIO_USERSPACE; 1212 } else { 1213 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 1214 seg = UIO_SYSSPACE; 1215 } 1216 1217 switch (args->mode & S_IFMT) { 1218 case S_IFIFO: 1219 case S_IFSOCK: 1220 error = kern_mkfifoat(td, dfd, path, seg, args->mode); 1221 break; 1222 1223 case S_IFCHR: 1224 case S_IFBLK: 1225 error = kern_mknodat(td, dfd, path, seg, args->mode, 1226 args->dev); 1227 break; 1228 1229 case S_IFDIR: 1230 error = EPERM; 1231 break; 1232 1233 case 0: 1234 args->mode |= S_IFREG; 1235 /* FALLTHROUGH */ 1236 case S_IFREG: 1237 error = kern_openat(td, dfd, path, seg, 1238 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1239 if (error == 0) 1240 kern_close(td, td->td_retval[0]); 1241 break; 1242 1243 default: 1244 error = EINVAL; 1245 break; 1246 } 1247 if (convpath) 1248 LFREEPATH(path); 1249 return (error); 1250 } 1251 1252 /* 1253 * UGH! This is just about the dumbest idea I've ever heard!! 1254 */ 1255 int 1256 linux_personality(struct thread *td, struct linux_personality_args *args) 1257 { 1258 struct linux_pemuldata *pem; 1259 struct proc *p = td->td_proc; 1260 uint32_t old; 1261 1262 PROC_LOCK(p); 1263 pem = pem_find(p); 1264 old = pem->persona; 1265 if (args->per != 0xffffffff) 1266 pem->persona = args->per; 1267 PROC_UNLOCK(p); 1268 1269 td->td_retval[0] = old; 1270 return (0); 1271 } 1272 1273 struct l_itimerval { 1274 l_timeval it_interval; 1275 l_timeval it_value; 1276 }; 1277 1278 #define B2L_ITIMERVAL(bip, lip) \ 1279 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1280 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1281 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1282 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1283 1284 int 1285 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1286 { 1287 int error; 1288 struct l_itimerval ls; 1289 struct itimerval aitv, oitv; 1290 1291 if (uap->itv == NULL) { 1292 uap->itv = uap->oitv; 1293 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1294 } 1295 1296 error = copyin(uap->itv, &ls, sizeof(ls)); 1297 if (error != 0) 1298 return (error); 1299 B2L_ITIMERVAL(&aitv, &ls); 1300 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1301 if (error != 0 || uap->oitv == NULL) 1302 return (error); 1303 B2L_ITIMERVAL(&ls, &oitv); 1304 1305 return (copyout(&ls, uap->oitv, sizeof(ls))); 1306 } 1307 1308 int 1309 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1310 { 1311 int error; 1312 struct l_itimerval ls; 1313 struct itimerval aitv; 1314 1315 error = kern_getitimer(td, uap->which, &aitv); 1316 if (error != 0) 1317 return (error); 1318 B2L_ITIMERVAL(&ls, &aitv); 1319 return (copyout(&ls, uap->itv, sizeof(ls))); 1320 } 1321 1322 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1323 int 1324 linux_nice(struct thread *td, struct linux_nice_args *args) 1325 { 1326 1327 return (kern_setpriority(td, PRIO_PROCESS, 0, args->inc)); 1328 } 1329 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1330 1331 int 1332 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1333 { 1334 struct ucred *newcred, *oldcred; 1335 l_gid_t *linux_gidset; 1336 gid_t *bsd_gidset; 1337 int ngrp, error; 1338 struct proc *p; 1339 1340 ngrp = args->gidsetsize; 1341 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1342 return (EINVAL); 1343 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK); 1344 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1345 if (error) 1346 goto out; 1347 newcred = crget(); 1348 crextend(newcred, ngrp + 1); 1349 p = td->td_proc; 1350 PROC_LOCK(p); 1351 oldcred = p->p_ucred; 1352 crcopy(newcred, oldcred); 1353 1354 /* 1355 * cr_groups[0] holds egid. Setting the whole set from 1356 * the supplied set will cause egid to be changed too. 1357 * Keep cr_groups[0] unchanged to prevent that. 1358 */ 1359 1360 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) { 1361 PROC_UNLOCK(p); 1362 crfree(newcred); 1363 goto out; 1364 } 1365 1366 if (ngrp > 0) { 1367 newcred->cr_ngroups = ngrp + 1; 1368 1369 bsd_gidset = newcred->cr_groups; 1370 ngrp--; 1371 while (ngrp >= 0) { 1372 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1373 ngrp--; 1374 } 1375 } else 1376 newcred->cr_ngroups = 1; 1377 1378 setsugid(p); 1379 proc_set_cred(p, newcred); 1380 PROC_UNLOCK(p); 1381 crfree(oldcred); 1382 error = 0; 1383 out: 1384 free(linux_gidset, M_LINUX); 1385 return (error); 1386 } 1387 1388 int 1389 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1390 { 1391 struct ucred *cred; 1392 l_gid_t *linux_gidset; 1393 gid_t *bsd_gidset; 1394 int bsd_gidsetsz, ngrp, error; 1395 1396 cred = td->td_ucred; 1397 bsd_gidset = cred->cr_groups; 1398 bsd_gidsetsz = cred->cr_ngroups - 1; 1399 1400 /* 1401 * cr_groups[0] holds egid. Returning the whole set 1402 * here will cause a duplicate. Exclude cr_groups[0] 1403 * to prevent that. 1404 */ 1405 1406 if ((ngrp = args->gidsetsize) == 0) { 1407 td->td_retval[0] = bsd_gidsetsz; 1408 return (0); 1409 } 1410 1411 if (ngrp < bsd_gidsetsz) 1412 return (EINVAL); 1413 1414 ngrp = 0; 1415 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1416 M_LINUX, M_WAITOK); 1417 while (ngrp < bsd_gidsetsz) { 1418 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1419 ngrp++; 1420 } 1421 1422 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1423 free(linux_gidset, M_LINUX); 1424 if (error) 1425 return (error); 1426 1427 td->td_retval[0] = ngrp; 1428 return (0); 1429 } 1430 1431 static bool 1432 linux_get_dummy_limit(l_uint resource, struct rlimit *rlim) 1433 { 1434 1435 if (linux_dummy_rlimits == 0) 1436 return (false); 1437 1438 switch (resource) { 1439 case LINUX_RLIMIT_LOCKS: 1440 case LINUX_RLIMIT_SIGPENDING: 1441 case LINUX_RLIMIT_MSGQUEUE: 1442 case LINUX_RLIMIT_RTTIME: 1443 rlim->rlim_cur = LINUX_RLIM_INFINITY; 1444 rlim->rlim_max = LINUX_RLIM_INFINITY; 1445 return (true); 1446 case LINUX_RLIMIT_NICE: 1447 case LINUX_RLIMIT_RTPRIO: 1448 rlim->rlim_cur = 0; 1449 rlim->rlim_max = 0; 1450 return (true); 1451 default: 1452 return (false); 1453 } 1454 } 1455 1456 int 1457 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1458 { 1459 struct rlimit bsd_rlim; 1460 struct l_rlimit rlim; 1461 u_int which; 1462 int error; 1463 1464 if (args->resource >= LINUX_RLIM_NLIMITS) 1465 return (EINVAL); 1466 1467 which = linux_to_bsd_resource[args->resource]; 1468 if (which == -1) 1469 return (EINVAL); 1470 1471 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1472 if (error) 1473 return (error); 1474 1475 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1476 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1477 return (kern_setrlimit(td, which, &bsd_rlim)); 1478 } 1479 1480 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1481 int 1482 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1483 { 1484 struct l_rlimit rlim; 1485 struct rlimit bsd_rlim; 1486 u_int which; 1487 1488 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1489 rlim.rlim_cur = bsd_rlim.rlim_cur; 1490 rlim.rlim_max = bsd_rlim.rlim_max; 1491 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1492 } 1493 1494 if (args->resource >= LINUX_RLIM_NLIMITS) 1495 return (EINVAL); 1496 1497 which = linux_to_bsd_resource[args->resource]; 1498 if (which == -1) 1499 return (EINVAL); 1500 1501 lim_rlimit(td, which, &bsd_rlim); 1502 1503 #ifdef COMPAT_LINUX32 1504 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1505 if (rlim.rlim_cur == UINT_MAX) 1506 rlim.rlim_cur = INT_MAX; 1507 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1508 if (rlim.rlim_max == UINT_MAX) 1509 rlim.rlim_max = INT_MAX; 1510 #else 1511 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1512 if (rlim.rlim_cur == ULONG_MAX) 1513 rlim.rlim_cur = LONG_MAX; 1514 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1515 if (rlim.rlim_max == ULONG_MAX) 1516 rlim.rlim_max = LONG_MAX; 1517 #endif 1518 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1519 } 1520 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1521 1522 int 1523 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1524 { 1525 struct l_rlimit rlim; 1526 struct rlimit bsd_rlim; 1527 u_int which; 1528 1529 if (linux_get_dummy_limit(args->resource, &bsd_rlim)) { 1530 rlim.rlim_cur = bsd_rlim.rlim_cur; 1531 rlim.rlim_max = bsd_rlim.rlim_max; 1532 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1533 } 1534 1535 if (args->resource >= LINUX_RLIM_NLIMITS) 1536 return (EINVAL); 1537 1538 which = linux_to_bsd_resource[args->resource]; 1539 if (which == -1) 1540 return (EINVAL); 1541 1542 lim_rlimit(td, which, &bsd_rlim); 1543 1544 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1545 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1546 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1547 } 1548 1549 int 1550 linux_sched_setscheduler(struct thread *td, 1551 struct linux_sched_setscheduler_args *args) 1552 { 1553 struct sched_param sched_param; 1554 struct thread *tdt; 1555 int error, policy; 1556 1557 switch (args->policy) { 1558 case LINUX_SCHED_OTHER: 1559 policy = SCHED_OTHER; 1560 break; 1561 case LINUX_SCHED_FIFO: 1562 policy = SCHED_FIFO; 1563 break; 1564 case LINUX_SCHED_RR: 1565 policy = SCHED_RR; 1566 break; 1567 default: 1568 return (EINVAL); 1569 } 1570 1571 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1572 if (error) 1573 return (error); 1574 1575 if (linux_map_sched_prio) { 1576 switch (policy) { 1577 case SCHED_OTHER: 1578 if (sched_param.sched_priority != 0) 1579 return (EINVAL); 1580 1581 sched_param.sched_priority = 1582 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 1583 break; 1584 case SCHED_FIFO: 1585 case SCHED_RR: 1586 if (sched_param.sched_priority < 1 || 1587 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) 1588 return (EINVAL); 1589 1590 /* 1591 * Map [1, LINUX_MAX_RT_PRIO - 1] to 1592 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 1593 */ 1594 sched_param.sched_priority = 1595 (sched_param.sched_priority - 1) * 1596 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 1597 (LINUX_MAX_RT_PRIO - 1); 1598 break; 1599 } 1600 } 1601 1602 tdt = linux_tdfind(td, args->pid, -1); 1603 if (tdt == NULL) 1604 return (ESRCH); 1605 1606 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1607 PROC_UNLOCK(tdt->td_proc); 1608 return (error); 1609 } 1610 1611 int 1612 linux_sched_getscheduler(struct thread *td, 1613 struct linux_sched_getscheduler_args *args) 1614 { 1615 struct thread *tdt; 1616 int error, policy; 1617 1618 tdt = linux_tdfind(td, args->pid, -1); 1619 if (tdt == NULL) 1620 return (ESRCH); 1621 1622 error = kern_sched_getscheduler(td, tdt, &policy); 1623 PROC_UNLOCK(tdt->td_proc); 1624 1625 switch (policy) { 1626 case SCHED_OTHER: 1627 td->td_retval[0] = LINUX_SCHED_OTHER; 1628 break; 1629 case SCHED_FIFO: 1630 td->td_retval[0] = LINUX_SCHED_FIFO; 1631 break; 1632 case SCHED_RR: 1633 td->td_retval[0] = LINUX_SCHED_RR; 1634 break; 1635 } 1636 return (error); 1637 } 1638 1639 int 1640 linux_sched_get_priority_max(struct thread *td, 1641 struct linux_sched_get_priority_max_args *args) 1642 { 1643 struct sched_get_priority_max_args bsd; 1644 1645 if (linux_map_sched_prio) { 1646 switch (args->policy) { 1647 case LINUX_SCHED_OTHER: 1648 td->td_retval[0] = 0; 1649 return (0); 1650 case LINUX_SCHED_FIFO: 1651 case LINUX_SCHED_RR: 1652 td->td_retval[0] = LINUX_MAX_RT_PRIO - 1; 1653 return (0); 1654 default: 1655 return (EINVAL); 1656 } 1657 } 1658 1659 switch (args->policy) { 1660 case LINUX_SCHED_OTHER: 1661 bsd.policy = SCHED_OTHER; 1662 break; 1663 case LINUX_SCHED_FIFO: 1664 bsd.policy = SCHED_FIFO; 1665 break; 1666 case LINUX_SCHED_RR: 1667 bsd.policy = SCHED_RR; 1668 break; 1669 default: 1670 return (EINVAL); 1671 } 1672 return (sys_sched_get_priority_max(td, &bsd)); 1673 } 1674 1675 int 1676 linux_sched_get_priority_min(struct thread *td, 1677 struct linux_sched_get_priority_min_args *args) 1678 { 1679 struct sched_get_priority_min_args bsd; 1680 1681 if (linux_map_sched_prio) { 1682 switch (args->policy) { 1683 case LINUX_SCHED_OTHER: 1684 td->td_retval[0] = 0; 1685 return (0); 1686 case LINUX_SCHED_FIFO: 1687 case LINUX_SCHED_RR: 1688 td->td_retval[0] = 1; 1689 return (0); 1690 default: 1691 return (EINVAL); 1692 } 1693 } 1694 1695 switch (args->policy) { 1696 case LINUX_SCHED_OTHER: 1697 bsd.policy = SCHED_OTHER; 1698 break; 1699 case LINUX_SCHED_FIFO: 1700 bsd.policy = SCHED_FIFO; 1701 break; 1702 case LINUX_SCHED_RR: 1703 bsd.policy = SCHED_RR; 1704 break; 1705 default: 1706 return (EINVAL); 1707 } 1708 return (sys_sched_get_priority_min(td, &bsd)); 1709 } 1710 1711 #define REBOOT_CAD_ON 0x89abcdef 1712 #define REBOOT_CAD_OFF 0 1713 #define REBOOT_HALT 0xcdef0123 1714 #define REBOOT_RESTART 0x01234567 1715 #define REBOOT_RESTART2 0xA1B2C3D4 1716 #define REBOOT_POWEROFF 0x4321FEDC 1717 #define REBOOT_MAGIC1 0xfee1dead 1718 #define REBOOT_MAGIC2 0x28121969 1719 #define REBOOT_MAGIC2A 0x05121996 1720 #define REBOOT_MAGIC2B 0x16041998 1721 1722 int 1723 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1724 { 1725 struct reboot_args bsd_args; 1726 1727 if (args->magic1 != REBOOT_MAGIC1) 1728 return (EINVAL); 1729 1730 switch (args->magic2) { 1731 case REBOOT_MAGIC2: 1732 case REBOOT_MAGIC2A: 1733 case REBOOT_MAGIC2B: 1734 break; 1735 default: 1736 return (EINVAL); 1737 } 1738 1739 switch (args->cmd) { 1740 case REBOOT_CAD_ON: 1741 case REBOOT_CAD_OFF: 1742 return (priv_check(td, PRIV_REBOOT)); 1743 case REBOOT_HALT: 1744 bsd_args.opt = RB_HALT; 1745 break; 1746 case REBOOT_RESTART: 1747 case REBOOT_RESTART2: 1748 bsd_args.opt = 0; 1749 break; 1750 case REBOOT_POWEROFF: 1751 bsd_args.opt = RB_POWEROFF; 1752 break; 1753 default: 1754 return (EINVAL); 1755 } 1756 return (sys_reboot(td, &bsd_args)); 1757 } 1758 1759 int 1760 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1761 { 1762 1763 td->td_retval[0] = td->td_proc->p_pid; 1764 1765 return (0); 1766 } 1767 1768 int 1769 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1770 { 1771 struct linux_emuldata *em; 1772 1773 em = em_find(td); 1774 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1775 1776 td->td_retval[0] = em->em_tid; 1777 1778 return (0); 1779 } 1780 1781 int 1782 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1783 { 1784 1785 td->td_retval[0] = kern_getppid(td); 1786 return (0); 1787 } 1788 1789 int 1790 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1791 { 1792 1793 td->td_retval[0] = td->td_ucred->cr_rgid; 1794 return (0); 1795 } 1796 1797 int 1798 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1799 { 1800 1801 td->td_retval[0] = td->td_ucred->cr_ruid; 1802 return (0); 1803 } 1804 1805 int 1806 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1807 { 1808 1809 return (kern_getsid(td, args->pid)); 1810 } 1811 1812 int 1813 linux_nosys(struct thread *td, struct nosys_args *ignore) 1814 { 1815 1816 return (ENOSYS); 1817 } 1818 1819 int 1820 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1821 { 1822 int error; 1823 1824 error = kern_getpriority(td, args->which, args->who); 1825 td->td_retval[0] = 20 - td->td_retval[0]; 1826 return (error); 1827 } 1828 1829 int 1830 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1831 { 1832 int name[2]; 1833 1834 name[0] = CTL_KERN; 1835 name[1] = KERN_HOSTNAME; 1836 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1837 args->len, 0, 0)); 1838 } 1839 1840 int 1841 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1842 { 1843 int name[2]; 1844 1845 name[0] = CTL_KERN; 1846 name[1] = KERN_NISDOMAINNAME; 1847 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1848 args->len, 0, 0)); 1849 } 1850 1851 int 1852 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1853 { 1854 1855 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1856 args->error_code); 1857 1858 /* 1859 * XXX: we should send a signal to the parent if 1860 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1861 * as it doesnt occur often. 1862 */ 1863 exit1(td, args->error_code, 0); 1864 /* NOTREACHED */ 1865 } 1866 1867 #define _LINUX_CAPABILITY_VERSION_1 0x19980330 1868 #define _LINUX_CAPABILITY_VERSION_2 0x20071026 1869 #define _LINUX_CAPABILITY_VERSION_3 0x20080522 1870 1871 struct l_user_cap_header { 1872 l_int version; 1873 l_int pid; 1874 }; 1875 1876 struct l_user_cap_data { 1877 l_int effective; 1878 l_int permitted; 1879 l_int inheritable; 1880 }; 1881 1882 int 1883 linux_capget(struct thread *td, struct linux_capget_args *uap) 1884 { 1885 struct l_user_cap_header luch; 1886 struct l_user_cap_data lucd[2]; 1887 int error, u32s; 1888 1889 if (uap->hdrp == NULL) 1890 return (EFAULT); 1891 1892 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1893 if (error != 0) 1894 return (error); 1895 1896 switch (luch.version) { 1897 case _LINUX_CAPABILITY_VERSION_1: 1898 u32s = 1; 1899 break; 1900 case _LINUX_CAPABILITY_VERSION_2: 1901 case _LINUX_CAPABILITY_VERSION_3: 1902 u32s = 2; 1903 break; 1904 default: 1905 luch.version = _LINUX_CAPABILITY_VERSION_1; 1906 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1907 if (error) 1908 return (error); 1909 return (EINVAL); 1910 } 1911 1912 if (luch.pid) 1913 return (EPERM); 1914 1915 if (uap->datap) { 1916 /* 1917 * The current implementation doesn't support setting 1918 * a capability (it's essentially a stub) so indicate 1919 * that no capabilities are currently set or available 1920 * to request. 1921 */ 1922 memset(&lucd, 0, u32s * sizeof(lucd[0])); 1923 error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0])); 1924 } 1925 1926 return (error); 1927 } 1928 1929 int 1930 linux_capset(struct thread *td, struct linux_capset_args *uap) 1931 { 1932 struct l_user_cap_header luch; 1933 struct l_user_cap_data lucd[2]; 1934 int error, i, u32s; 1935 1936 if (uap->hdrp == NULL || uap->datap == NULL) 1937 return (EFAULT); 1938 1939 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1940 if (error != 0) 1941 return (error); 1942 1943 switch (luch.version) { 1944 case _LINUX_CAPABILITY_VERSION_1: 1945 u32s = 1; 1946 break; 1947 case _LINUX_CAPABILITY_VERSION_2: 1948 case _LINUX_CAPABILITY_VERSION_3: 1949 u32s = 2; 1950 break; 1951 default: 1952 luch.version = _LINUX_CAPABILITY_VERSION_1; 1953 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1954 if (error) 1955 return (error); 1956 return (EINVAL); 1957 } 1958 1959 if (luch.pid) 1960 return (EPERM); 1961 1962 error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0])); 1963 if (error != 0) 1964 return (error); 1965 1966 /* We currently don't support setting any capabilities. */ 1967 for (i = 0; i < u32s; i++) { 1968 if (lucd[i].effective || lucd[i].permitted || 1969 lucd[i].inheritable) { 1970 linux_msg(td, 1971 "capset[%d] effective=0x%x, permitted=0x%x, " 1972 "inheritable=0x%x is not implemented", i, 1973 (int)lucd[i].effective, (int)lucd[i].permitted, 1974 (int)lucd[i].inheritable); 1975 return (EPERM); 1976 } 1977 } 1978 1979 return (0); 1980 } 1981 1982 int 1983 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1984 { 1985 int error = 0, max_size, arg; 1986 struct proc *p = td->td_proc; 1987 char comm[LINUX_MAX_COMM_LEN]; 1988 int pdeath_signal, trace_state; 1989 1990 switch (args->option) { 1991 case LINUX_PR_SET_PDEATHSIG: 1992 if (!LINUX_SIG_VALID(args->arg2)) 1993 return (EINVAL); 1994 pdeath_signal = linux_to_bsd_signal(args->arg2); 1995 return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL, 1996 &pdeath_signal)); 1997 case LINUX_PR_GET_PDEATHSIG: 1998 error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS, 1999 &pdeath_signal); 2000 if (error != 0) 2001 return (error); 2002 pdeath_signal = bsd_to_linux_signal(pdeath_signal); 2003 return (copyout(&pdeath_signal, 2004 (void *)(register_t)args->arg2, 2005 sizeof(pdeath_signal))); 2006 /* 2007 * In Linux, this flag controls if set[gu]id processes can coredump. 2008 * There are additional semantics imposed on processes that cannot 2009 * coredump: 2010 * - Such processes can not be ptraced. 2011 * - There are some semantics around ownership of process-related files 2012 * in the /proc namespace. 2013 * 2014 * In FreeBSD, we can (and by default, do) disable setuid coredump 2015 * system-wide with 'sugid_coredump.' We control tracability on a 2016 * per-process basis with the procctl PROC_TRACE (=> P2_NOTRACE flag). 2017 * By happy coincidence, P2_NOTRACE also prevents coredumping. So the 2018 * procctl is roughly analogous to Linux's DUMPABLE. 2019 * 2020 * So, proxy these knobs to the corresponding PROC_TRACE setting. 2021 */ 2022 case LINUX_PR_GET_DUMPABLE: 2023 error = kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_STATUS, 2024 &trace_state); 2025 if (error != 0) 2026 return (error); 2027 td->td_retval[0] = (trace_state != -1); 2028 return (0); 2029 case LINUX_PR_SET_DUMPABLE: 2030 /* 2031 * It is only valid for userspace to set one of these two 2032 * flags, and only one at a time. 2033 */ 2034 switch (args->arg2) { 2035 case LINUX_SUID_DUMP_DISABLE: 2036 trace_state = PROC_TRACE_CTL_DISABLE_EXEC; 2037 break; 2038 case LINUX_SUID_DUMP_USER: 2039 trace_state = PROC_TRACE_CTL_ENABLE; 2040 break; 2041 default: 2042 return (EINVAL); 2043 } 2044 return (kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_CTL, 2045 &trace_state)); 2046 case LINUX_PR_GET_KEEPCAPS: 2047 /* 2048 * Indicate that we always clear the effective and 2049 * permitted capability sets when the user id becomes 2050 * non-zero (actually the capability sets are simply 2051 * always zero in the current implementation). 2052 */ 2053 td->td_retval[0] = 0; 2054 break; 2055 case LINUX_PR_SET_KEEPCAPS: 2056 /* 2057 * Ignore requests to keep the effective and permitted 2058 * capability sets when the user id becomes non-zero. 2059 */ 2060 break; 2061 case LINUX_PR_SET_NAME: 2062 /* 2063 * To be on the safe side we need to make sure to not 2064 * overflow the size a Linux program expects. We already 2065 * do this here in the copyin, so that we don't need to 2066 * check on copyout. 2067 */ 2068 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 2069 error = copyinstr((void *)(register_t)args->arg2, comm, 2070 max_size, NULL); 2071 2072 /* Linux silently truncates the name if it is too long. */ 2073 if (error == ENAMETOOLONG) { 2074 /* 2075 * XXX: copyinstr() isn't documented to populate the 2076 * array completely, so do a copyin() to be on the 2077 * safe side. This should be changed in case 2078 * copyinstr() is changed to guarantee this. 2079 */ 2080 error = copyin((void *)(register_t)args->arg2, comm, 2081 max_size - 1); 2082 comm[max_size - 1] = '\0'; 2083 } 2084 if (error) 2085 return (error); 2086 2087 PROC_LOCK(p); 2088 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 2089 PROC_UNLOCK(p); 2090 break; 2091 case LINUX_PR_GET_NAME: 2092 PROC_LOCK(p); 2093 strlcpy(comm, p->p_comm, sizeof(comm)); 2094 PROC_UNLOCK(p); 2095 error = copyout(comm, (void *)(register_t)args->arg2, 2096 strlen(comm) + 1); 2097 break; 2098 case LINUX_PR_GET_SECCOMP: 2099 case LINUX_PR_SET_SECCOMP: 2100 /* 2101 * Same as returned by Linux without CONFIG_SECCOMP enabled. 2102 */ 2103 error = EINVAL; 2104 break; 2105 case LINUX_PR_CAPBSET_READ: 2106 #if 0 2107 /* 2108 * This makes too much noise with Ubuntu Focal. 2109 */ 2110 linux_msg(td, "unsupported prctl PR_CAPBSET_READ %d", 2111 (int)args->arg2); 2112 #endif 2113 error = EINVAL; 2114 break; 2115 case LINUX_PR_SET_NO_NEW_PRIVS: 2116 arg = args->arg2 == 1 ? 2117 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE; 2118 error = kern_procctl(td, P_PID, p->p_pid, 2119 PROC_NO_NEW_PRIVS_CTL, &arg); 2120 break; 2121 case LINUX_PR_SET_PTRACER: 2122 linux_msg(td, "unsupported prctl PR_SET_PTRACER"); 2123 error = EINVAL; 2124 break; 2125 default: 2126 linux_msg(td, "unsupported prctl option %d", args->option); 2127 error = EINVAL; 2128 break; 2129 } 2130 2131 return (error); 2132 } 2133 2134 int 2135 linux_sched_setparam(struct thread *td, 2136 struct linux_sched_setparam_args *uap) 2137 { 2138 struct sched_param sched_param; 2139 struct thread *tdt; 2140 int error, policy; 2141 2142 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 2143 if (error) 2144 return (error); 2145 2146 tdt = linux_tdfind(td, uap->pid, -1); 2147 if (tdt == NULL) 2148 return (ESRCH); 2149 2150 if (linux_map_sched_prio) { 2151 error = kern_sched_getscheduler(td, tdt, &policy); 2152 if (error) 2153 goto out; 2154 2155 switch (policy) { 2156 case SCHED_OTHER: 2157 if (sched_param.sched_priority != 0) { 2158 error = EINVAL; 2159 goto out; 2160 } 2161 sched_param.sched_priority = 2162 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 2163 break; 2164 case SCHED_FIFO: 2165 case SCHED_RR: 2166 if (sched_param.sched_priority < 1 || 2167 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) { 2168 error = EINVAL; 2169 goto out; 2170 } 2171 /* 2172 * Map [1, LINUX_MAX_RT_PRIO - 1] to 2173 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 2174 */ 2175 sched_param.sched_priority = 2176 (sched_param.sched_priority - 1) * 2177 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 2178 (LINUX_MAX_RT_PRIO - 1); 2179 break; 2180 } 2181 } 2182 2183 error = kern_sched_setparam(td, tdt, &sched_param); 2184 out: PROC_UNLOCK(tdt->td_proc); 2185 return (error); 2186 } 2187 2188 int 2189 linux_sched_getparam(struct thread *td, 2190 struct linux_sched_getparam_args *uap) 2191 { 2192 struct sched_param sched_param; 2193 struct thread *tdt; 2194 int error, policy; 2195 2196 tdt = linux_tdfind(td, uap->pid, -1); 2197 if (tdt == NULL) 2198 return (ESRCH); 2199 2200 error = kern_sched_getparam(td, tdt, &sched_param); 2201 if (error) { 2202 PROC_UNLOCK(tdt->td_proc); 2203 return (error); 2204 } 2205 2206 if (linux_map_sched_prio) { 2207 error = kern_sched_getscheduler(td, tdt, &policy); 2208 PROC_UNLOCK(tdt->td_proc); 2209 if (error) 2210 return (error); 2211 2212 switch (policy) { 2213 case SCHED_OTHER: 2214 sched_param.sched_priority = 0; 2215 break; 2216 case SCHED_FIFO: 2217 case SCHED_RR: 2218 /* 2219 * Map [0, RTP_PRIO_MAX - RTP_PRIO_MIN] to 2220 * [1, LINUX_MAX_RT_PRIO - 1] (rounding up). 2221 */ 2222 sched_param.sched_priority = 2223 (sched_param.sched_priority * 2224 (LINUX_MAX_RT_PRIO - 1) + 2225 (RTP_PRIO_MAX - RTP_PRIO_MIN - 1)) / 2226 (RTP_PRIO_MAX - RTP_PRIO_MIN) + 1; 2227 break; 2228 } 2229 } else 2230 PROC_UNLOCK(tdt->td_proc); 2231 2232 error = copyout(&sched_param, uap->param, sizeof(sched_param)); 2233 return (error); 2234 } 2235 2236 /* 2237 * Get affinity of a process. 2238 */ 2239 int 2240 linux_sched_getaffinity(struct thread *td, 2241 struct linux_sched_getaffinity_args *args) 2242 { 2243 int error; 2244 struct thread *tdt; 2245 2246 if (args->len < sizeof(cpuset_t)) 2247 return (EINVAL); 2248 2249 tdt = linux_tdfind(td, args->pid, -1); 2250 if (tdt == NULL) 2251 return (ESRCH); 2252 2253 PROC_UNLOCK(tdt->td_proc); 2254 2255 error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2256 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr); 2257 if (error == 0) 2258 td->td_retval[0] = sizeof(cpuset_t); 2259 2260 return (error); 2261 } 2262 2263 /* 2264 * Set affinity of a process. 2265 */ 2266 int 2267 linux_sched_setaffinity(struct thread *td, 2268 struct linux_sched_setaffinity_args *args) 2269 { 2270 struct thread *tdt; 2271 2272 if (args->len < sizeof(cpuset_t)) 2273 return (EINVAL); 2274 2275 tdt = linux_tdfind(td, args->pid, -1); 2276 if (tdt == NULL) 2277 return (ESRCH); 2278 2279 PROC_UNLOCK(tdt->td_proc); 2280 2281 return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2282 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr)); 2283 } 2284 2285 struct linux_rlimit64 { 2286 uint64_t rlim_cur; 2287 uint64_t rlim_max; 2288 }; 2289 2290 int 2291 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2292 { 2293 struct rlimit rlim, nrlim; 2294 struct linux_rlimit64 lrlim; 2295 struct proc *p; 2296 u_int which; 2297 int flags; 2298 int error; 2299 2300 if (args->new == NULL && args->old != NULL) { 2301 if (linux_get_dummy_limit(args->resource, &rlim)) { 2302 lrlim.rlim_cur = rlim.rlim_cur; 2303 lrlim.rlim_max = rlim.rlim_max; 2304 return (copyout(&lrlim, args->old, sizeof(lrlim))); 2305 } 2306 } 2307 2308 if (args->resource >= LINUX_RLIM_NLIMITS) 2309 return (EINVAL); 2310 2311 which = linux_to_bsd_resource[args->resource]; 2312 if (which == -1) 2313 return (EINVAL); 2314 2315 if (args->new != NULL) { 2316 /* 2317 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2318 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2319 * as INFINITY so we do not need a conversion even. 2320 */ 2321 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2322 if (error != 0) 2323 return (error); 2324 } 2325 2326 flags = PGET_HOLD | PGET_NOTWEXIT; 2327 if (args->new != NULL) 2328 flags |= PGET_CANDEBUG; 2329 else 2330 flags |= PGET_CANSEE; 2331 if (args->pid == 0) { 2332 p = td->td_proc; 2333 PHOLD(p); 2334 } else { 2335 error = pget(args->pid, flags, &p); 2336 if (error != 0) 2337 return (error); 2338 } 2339 if (args->old != NULL) { 2340 PROC_LOCK(p); 2341 lim_rlimit_proc(p, which, &rlim); 2342 PROC_UNLOCK(p); 2343 if (rlim.rlim_cur == RLIM_INFINITY) 2344 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2345 else 2346 lrlim.rlim_cur = rlim.rlim_cur; 2347 if (rlim.rlim_max == RLIM_INFINITY) 2348 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2349 else 2350 lrlim.rlim_max = rlim.rlim_max; 2351 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2352 if (error != 0) 2353 goto out; 2354 } 2355 2356 if (args->new != NULL) 2357 error = kern_proc_setrlimit(td, p, which, &nrlim); 2358 2359 out: 2360 PRELE(p); 2361 return (error); 2362 } 2363 2364 int 2365 linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2366 { 2367 struct l_timespec lts; 2368 struct timespec ts, *tsp; 2369 int error; 2370 2371 if (args->tsp != NULL) { 2372 error = copyin(args->tsp, <s, sizeof(lts)); 2373 if (error != 0) 2374 return (error); 2375 error = linux_to_native_timespec(&ts, <s); 2376 if (error != 0) 2377 return (error); 2378 tsp = &ts; 2379 } else 2380 tsp = NULL; 2381 2382 error = linux_common_pselect6(td, args->nfds, args->readfds, 2383 args->writefds, args->exceptfds, tsp, args->sig); 2384 if (error != 0) 2385 return (error); 2386 2387 if (args->tsp != NULL) { 2388 error = native_to_linux_timespec(<s, tsp); 2389 if (error == 0) 2390 error = copyout(<s, args->tsp, sizeof(lts)); 2391 } 2392 return (error); 2393 } 2394 2395 static int 2396 linux_common_pselect6(struct thread *td, l_int nfds, l_fd_set *readfds, 2397 l_fd_set *writefds, l_fd_set *exceptfds, struct timespec *tsp, 2398 l_uintptr_t *sig) 2399 { 2400 struct timeval utv, tv0, tv1, *tvp; 2401 struct l_pselect6arg lpse6; 2402 l_sigset_t l_ss; 2403 sigset_t *ssp; 2404 sigset_t ss; 2405 int error; 2406 2407 ssp = NULL; 2408 if (sig != NULL) { 2409 error = copyin(sig, &lpse6, sizeof(lpse6)); 2410 if (error != 0) 2411 return (error); 2412 if (lpse6.ss_len != sizeof(l_ss)) 2413 return (EINVAL); 2414 if (lpse6.ss != 0) { 2415 error = copyin(PTRIN(lpse6.ss), &l_ss, 2416 sizeof(l_ss)); 2417 if (error != 0) 2418 return (error); 2419 linux_to_bsd_sigset(&l_ss, &ss); 2420 ssp = &ss; 2421 } 2422 } else 2423 ssp = NULL; 2424 2425 /* 2426 * Currently glibc changes nanosecond number to microsecond. 2427 * This mean losing precision but for now it is hardly seen. 2428 */ 2429 if (tsp != NULL) { 2430 TIMESPEC_TO_TIMEVAL(&utv, tsp); 2431 if (itimerfix(&utv)) 2432 return (EINVAL); 2433 2434 microtime(&tv0); 2435 tvp = &utv; 2436 } else 2437 tvp = NULL; 2438 2439 error = kern_pselect(td, nfds, readfds, writefds, 2440 exceptfds, tvp, ssp, LINUX_NFDBITS); 2441 2442 if (error == 0 && tsp != NULL) { 2443 if (td->td_retval[0] != 0) { 2444 /* 2445 * Compute how much time was left of the timeout, 2446 * by subtracting the current time and the time 2447 * before we started the call, and subtracting 2448 * that result from the user-supplied value. 2449 */ 2450 2451 microtime(&tv1); 2452 timevalsub(&tv1, &tv0); 2453 timevalsub(&utv, &tv1); 2454 if (utv.tv_sec < 0) 2455 timevalclear(&utv); 2456 } else 2457 timevalclear(&utv); 2458 TIMEVAL_TO_TIMESPEC(&utv, tsp); 2459 } 2460 return (error); 2461 } 2462 2463 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 2464 int 2465 linux_pselect6_time64(struct thread *td, 2466 struct linux_pselect6_time64_args *args) 2467 { 2468 struct l_timespec64 lts; 2469 struct timespec ts, *tsp; 2470 int error; 2471 2472 if (args->tsp != NULL) { 2473 error = copyin(args->tsp, <s, sizeof(lts)); 2474 if (error != 0) 2475 return (error); 2476 error = linux_to_native_timespec64(&ts, <s); 2477 if (error != 0) 2478 return (error); 2479 tsp = &ts; 2480 } else 2481 tsp = NULL; 2482 2483 error = linux_common_pselect6(td, args->nfds, args->readfds, 2484 args->writefds, args->exceptfds, tsp, args->sig); 2485 if (error != 0) 2486 return (error); 2487 2488 if (args->tsp != NULL) { 2489 error = native_to_linux_timespec64(<s, tsp); 2490 if (error == 0) 2491 error = copyout(<s, args->tsp, sizeof(lts)); 2492 } 2493 return (error); 2494 } 2495 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 2496 2497 int 2498 linux_ppoll(struct thread *td, struct linux_ppoll_args *args) 2499 { 2500 struct timespec uts, *tsp; 2501 struct l_timespec lts; 2502 int error; 2503 2504 if (args->tsp != NULL) { 2505 error = copyin(args->tsp, <s, sizeof(lts)); 2506 if (error) 2507 return (error); 2508 error = linux_to_native_timespec(&uts, <s); 2509 if (error != 0) 2510 return (error); 2511 tsp = &uts; 2512 } else 2513 tsp = NULL; 2514 2515 error = linux_common_ppoll(td, args->fds, args->nfds, tsp, 2516 args->sset, args->ssize); 2517 if (error != 0) 2518 return (error); 2519 if (tsp != NULL) { 2520 error = native_to_linux_timespec(<s, tsp); 2521 if (error == 0) 2522 error = copyout(<s, args->tsp, sizeof(lts)); 2523 } 2524 return (error); 2525 } 2526 2527 static int 2528 linux_common_ppoll(struct thread *td, struct pollfd *fds, uint32_t nfds, 2529 struct timespec *tsp, l_sigset_t *sset, l_size_t ssize) 2530 { 2531 struct timespec ts0, ts1; 2532 struct pollfd stackfds[32]; 2533 struct pollfd *kfds; 2534 l_sigset_t l_ss; 2535 sigset_t *ssp; 2536 sigset_t ss; 2537 int error; 2538 2539 if (kern_poll_maxfds(nfds)) 2540 return (EINVAL); 2541 if (sset != NULL) { 2542 if (ssize != sizeof(l_ss)) 2543 return (EINVAL); 2544 error = copyin(sset, &l_ss, sizeof(l_ss)); 2545 if (error) 2546 return (error); 2547 linux_to_bsd_sigset(&l_ss, &ss); 2548 ssp = &ss; 2549 } else 2550 ssp = NULL; 2551 if (tsp != NULL) 2552 nanotime(&ts0); 2553 2554 if (nfds > nitems(stackfds)) 2555 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK); 2556 else 2557 kfds = stackfds; 2558 error = linux_pollin(td, kfds, fds, nfds); 2559 if (error != 0) 2560 goto out; 2561 2562 error = kern_poll_kfds(td, kfds, nfds, tsp, ssp); 2563 if (error == 0) 2564 error = linux_pollout(td, kfds, fds, nfds); 2565 2566 if (error == 0 && tsp != NULL) { 2567 if (td->td_retval[0]) { 2568 nanotime(&ts1); 2569 timespecsub(&ts1, &ts0, &ts1); 2570 timespecsub(tsp, &ts1, tsp); 2571 if (tsp->tv_sec < 0) 2572 timespecclear(tsp); 2573 } else 2574 timespecclear(tsp); 2575 } 2576 2577 out: 2578 if (nfds > nitems(stackfds)) 2579 free(kfds, M_TEMP); 2580 return (error); 2581 } 2582 2583 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 2584 int 2585 linux_ppoll_time64(struct thread *td, struct linux_ppoll_time64_args *args) 2586 { 2587 struct timespec uts, *tsp; 2588 struct l_timespec64 lts; 2589 int error; 2590 2591 if (args->tsp != NULL) { 2592 error = copyin(args->tsp, <s, sizeof(lts)); 2593 if (error != 0) 2594 return (error); 2595 error = linux_to_native_timespec64(&uts, <s); 2596 if (error != 0) 2597 return (error); 2598 tsp = &uts; 2599 } else 2600 tsp = NULL; 2601 error = linux_common_ppoll(td, args->fds, args->nfds, tsp, 2602 args->sset, args->ssize); 2603 if (error != 0) 2604 return (error); 2605 if (tsp != NULL) { 2606 error = native_to_linux_timespec64(<s, tsp); 2607 if (error == 0) 2608 error = copyout(<s, args->tsp, sizeof(lts)); 2609 } 2610 return (error); 2611 } 2612 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 2613 2614 static int 2615 linux_pollin(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 2616 { 2617 int error; 2618 u_int i; 2619 2620 error = copyin(ufds, fds, nfd * sizeof(*fds)); 2621 if (error != 0) 2622 return (error); 2623 2624 for (i = 0; i < nfd; i++) { 2625 if (fds->events != 0) 2626 linux_to_bsd_poll_events(td, fds->fd, 2627 fds->events, &fds->events); 2628 fds++; 2629 } 2630 return (0); 2631 } 2632 2633 static int 2634 linux_pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 2635 { 2636 int error = 0; 2637 u_int i, n = 0; 2638 2639 for (i = 0; i < nfd; i++) { 2640 if (fds->revents != 0) { 2641 bsd_to_linux_poll_events(fds->revents, 2642 &fds->revents); 2643 n++; 2644 } 2645 error = copyout(&fds->revents, &ufds->revents, 2646 sizeof(ufds->revents)); 2647 if (error) 2648 return (error); 2649 fds++; 2650 ufds++; 2651 } 2652 td->td_retval[0] = n; 2653 return (0); 2654 } 2655 2656 int 2657 linux_sched_rr_get_interval(struct thread *td, 2658 struct linux_sched_rr_get_interval_args *uap) 2659 { 2660 struct timespec ts; 2661 struct l_timespec lts; 2662 struct thread *tdt; 2663 int error; 2664 2665 /* 2666 * According to man in case the invalid pid specified 2667 * EINVAL should be returned. 2668 */ 2669 if (uap->pid < 0) 2670 return (EINVAL); 2671 2672 tdt = linux_tdfind(td, uap->pid, -1); 2673 if (tdt == NULL) 2674 return (ESRCH); 2675 2676 error = kern_sched_rr_get_interval_td(td, tdt, &ts); 2677 PROC_UNLOCK(tdt->td_proc); 2678 if (error != 0) 2679 return (error); 2680 error = native_to_linux_timespec(<s, &ts); 2681 if (error != 0) 2682 return (error); 2683 return (copyout(<s, uap->interval, sizeof(lts))); 2684 } 2685 2686 /* 2687 * In case when the Linux thread is the initial thread in 2688 * the thread group thread id is equal to the process id. 2689 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2690 */ 2691 struct thread * 2692 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2693 { 2694 struct linux_emuldata *em; 2695 struct thread *tdt; 2696 struct proc *p; 2697 2698 tdt = NULL; 2699 if (tid == 0 || tid == td->td_tid) { 2700 tdt = td; 2701 PROC_LOCK(tdt->td_proc); 2702 } else if (tid > PID_MAX) 2703 tdt = tdfind(tid, pid); 2704 else { 2705 /* 2706 * Initial thread where the tid equal to the pid. 2707 */ 2708 p = pfind(tid); 2709 if (p != NULL) { 2710 if (SV_PROC_ABI(p) != SV_ABI_LINUX) { 2711 /* 2712 * p is not a Linuxulator process. 2713 */ 2714 PROC_UNLOCK(p); 2715 return (NULL); 2716 } 2717 FOREACH_THREAD_IN_PROC(p, tdt) { 2718 em = em_find(tdt); 2719 if (tid == em->em_tid) 2720 return (tdt); 2721 } 2722 PROC_UNLOCK(p); 2723 } 2724 return (NULL); 2725 } 2726 2727 return (tdt); 2728 } 2729 2730 void 2731 linux_to_bsd_waitopts(int options, int *bsdopts) 2732 { 2733 2734 if (options & LINUX_WNOHANG) 2735 *bsdopts |= WNOHANG; 2736 if (options & LINUX_WUNTRACED) 2737 *bsdopts |= WUNTRACED; 2738 if (options & LINUX_WEXITED) 2739 *bsdopts |= WEXITED; 2740 if (options & LINUX_WCONTINUED) 2741 *bsdopts |= WCONTINUED; 2742 if (options & LINUX_WNOWAIT) 2743 *bsdopts |= WNOWAIT; 2744 2745 if (options & __WCLONE) 2746 *bsdopts |= WLINUXCLONE; 2747 } 2748 2749 int 2750 linux_getrandom(struct thread *td, struct linux_getrandom_args *args) 2751 { 2752 struct uio uio; 2753 struct iovec iov; 2754 int error; 2755 2756 if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM)) 2757 return (EINVAL); 2758 if (args->count > INT_MAX) 2759 args->count = INT_MAX; 2760 2761 iov.iov_base = args->buf; 2762 iov.iov_len = args->count; 2763 2764 uio.uio_iov = &iov; 2765 uio.uio_iovcnt = 1; 2766 uio.uio_resid = iov.iov_len; 2767 uio.uio_segflg = UIO_USERSPACE; 2768 uio.uio_rw = UIO_READ; 2769 uio.uio_td = td; 2770 2771 error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK); 2772 if (error == 0) 2773 td->td_retval[0] = args->count - uio.uio_resid; 2774 return (error); 2775 } 2776 2777 int 2778 linux_mincore(struct thread *td, struct linux_mincore_args *args) 2779 { 2780 2781 /* Needs to be page-aligned */ 2782 if (args->start & PAGE_MASK) 2783 return (EINVAL); 2784 return (kern_mincore(td, args->start, args->len, args->vec)); 2785 } 2786 2787 #define SYSLOG_TAG "<6>" 2788 2789 int 2790 linux_syslog(struct thread *td, struct linux_syslog_args *args) 2791 { 2792 char buf[128], *src, *dst; 2793 u_int seq; 2794 int buflen, error; 2795 2796 if (args->type != LINUX_SYSLOG_ACTION_READ_ALL) { 2797 linux_msg(td, "syslog unsupported type 0x%x", args->type); 2798 return (EINVAL); 2799 } 2800 2801 if (args->len < 6) { 2802 td->td_retval[0] = 0; 2803 return (0); 2804 } 2805 2806 error = priv_check(td, PRIV_MSGBUF); 2807 if (error) 2808 return (error); 2809 2810 mtx_lock(&msgbuf_lock); 2811 msgbuf_peekbytes(msgbufp, NULL, 0, &seq); 2812 mtx_unlock(&msgbuf_lock); 2813 2814 dst = args->buf; 2815 error = copyout(&SYSLOG_TAG, dst, sizeof(SYSLOG_TAG)); 2816 /* The -1 is to skip the trailing '\0'. */ 2817 dst += sizeof(SYSLOG_TAG) - 1; 2818 2819 while (error == 0) { 2820 mtx_lock(&msgbuf_lock); 2821 buflen = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq); 2822 mtx_unlock(&msgbuf_lock); 2823 2824 if (buflen == 0) 2825 break; 2826 2827 for (src = buf; src < buf + buflen && error == 0; src++) { 2828 if (*src == '\0') 2829 continue; 2830 2831 if (dst >= args->buf + args->len) 2832 goto out; 2833 2834 error = copyout(src, dst, 1); 2835 dst++; 2836 2837 if (*src == '\n' && *(src + 1) != '<' && 2838 dst + sizeof(SYSLOG_TAG) < args->buf + args->len) { 2839 error = copyout(&SYSLOG_TAG, 2840 dst, sizeof(SYSLOG_TAG)); 2841 dst += sizeof(SYSLOG_TAG) - 1; 2842 } 2843 } 2844 } 2845 out: 2846 td->td_retval[0] = dst - args->buf; 2847 return (error); 2848 } 2849 2850 int 2851 linux_getcpu(struct thread *td, struct linux_getcpu_args *args) 2852 { 2853 int cpu, error, node; 2854 2855 cpu = td->td_oncpu; /* Make sure it doesn't change during copyout(9) */ 2856 error = 0; 2857 node = cpuid_to_pcpu[cpu]->pc_domain; 2858 2859 if (args->cpu != NULL) 2860 error = copyout(&cpu, args->cpu, sizeof(l_int)); 2861 if (args->node != NULL) 2862 error = copyout(&node, args->node, sizeof(l_int)); 2863 return (error); 2864 } 2865 2866 #if defined(__i386__) || defined(__amd64__) 2867 int 2868 linux_poll(struct thread *td, struct linux_poll_args *args) 2869 { 2870 struct timespec ts, *tsp; 2871 2872 if (args->timeout != INFTIM) { 2873 if (args->timeout < 0) 2874 return (EINVAL); 2875 ts.tv_sec = args->timeout / 1000; 2876 ts.tv_nsec = (args->timeout % 1000) * 1000000; 2877 tsp = &ts; 2878 } else 2879 tsp = NULL; 2880 2881 return (linux_common_ppoll(td, args->fds, args->nfds, 2882 tsp, NULL, 0)); 2883 } 2884 #endif /* __i386__ || __amd64__ */ 2885