1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 Søren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_compat.h" 34 #include "opt_kdtrace.h" 35 36 #include <sys/param.h> 37 #include <sys/blist.h> 38 #include <sys/fcntl.h> 39 #if defined(__i386__) 40 #include <sys/imgact_aout.h> 41 #endif 42 #include <sys/jail.h> 43 #include <sys/kernel.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/namei.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/reboot.h> 54 #include <sys/racct.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/sdt.h> 58 #include <sys/signalvar.h> 59 #include <sys/stat.h> 60 #include <sys/syscallsubr.h> 61 #include <sys/sysctl.h> 62 #include <sys/sysproto.h> 63 #include <sys/systm.h> 64 #include <sys/time.h> 65 #include <sys/vmmeter.h> 66 #include <sys/vnode.h> 67 #include <sys/wait.h> 68 #include <sys/cpuset.h> 69 70 #include <security/mac/mac_framework.h> 71 72 #include <vm/vm.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_kern.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_object.h> 78 #include <vm/swap_pager.h> 79 80 #ifdef COMPAT_LINUX32 81 #include <machine/../linux32/linux.h> 82 #include <machine/../linux32/linux32_proto.h> 83 #else 84 #include <machine/../linux/linux.h> 85 #include <machine/../linux/linux_proto.h> 86 #endif 87 88 #include <compat/linux/linux_dtrace.h> 89 #include <compat/linux/linux_file.h> 90 #include <compat/linux/linux_mib.h> 91 #include <compat/linux/linux_signal.h> 92 #include <compat/linux/linux_util.h> 93 #include <compat/linux/linux_sysproto.h> 94 #include <compat/linux/linux_emul.h> 95 #include <compat/linux/linux_misc.h> 96 97 /* DTrace init */ 98 LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE); 99 100 /* Linuxulator-global DTrace probes */ 101 LIN_SDT_PROBE_DECLARE(locks, emul_lock, locked); 102 LIN_SDT_PROBE_DECLARE(locks, emul_lock, unlock); 103 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, locked); 104 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, unlock); 105 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, locked); 106 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, unlock); 107 108 int stclohz; /* Statistics clock frequency */ 109 110 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 111 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 112 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 113 RLIMIT_MEMLOCK, RLIMIT_AS 114 }; 115 116 struct l_sysinfo { 117 l_long uptime; /* Seconds since boot */ 118 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 119 #define LINUX_SYSINFO_LOADS_SCALE 65536 120 l_ulong totalram; /* Total usable main memory size */ 121 l_ulong freeram; /* Available memory size */ 122 l_ulong sharedram; /* Amount of shared memory */ 123 l_ulong bufferram; /* Memory used by buffers */ 124 l_ulong totalswap; /* Total swap space size */ 125 l_ulong freeswap; /* swap space still available */ 126 l_ushort procs; /* Number of current processes */ 127 l_ushort pads; 128 l_ulong totalbig; 129 l_ulong freebig; 130 l_uint mem_unit; 131 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 132 }; 133 int 134 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 135 { 136 struct l_sysinfo sysinfo; 137 vm_object_t object; 138 int i, j; 139 struct timespec ts; 140 141 getnanouptime(&ts); 142 if (ts.tv_nsec != 0) 143 ts.tv_sec++; 144 sysinfo.uptime = ts.tv_sec; 145 146 /* Use the information from the mib to get our load averages */ 147 for (i = 0; i < 3; i++) 148 sysinfo.loads[i] = averunnable.ldavg[i] * 149 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 150 151 sysinfo.totalram = physmem * PAGE_SIZE; 152 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 153 154 sysinfo.sharedram = 0; 155 mtx_lock(&vm_object_list_mtx); 156 TAILQ_FOREACH(object, &vm_object_list, object_list) 157 if (object->shadow_count > 1) 158 sysinfo.sharedram += object->resident_page_count; 159 mtx_unlock(&vm_object_list_mtx); 160 161 sysinfo.sharedram *= PAGE_SIZE; 162 sysinfo.bufferram = 0; 163 164 swap_pager_status(&i, &j); 165 sysinfo.totalswap = i * PAGE_SIZE; 166 sysinfo.freeswap = (i - j) * PAGE_SIZE; 167 168 sysinfo.procs = nprocs; 169 170 /* The following are only present in newer Linux kernels. */ 171 sysinfo.totalbig = 0; 172 sysinfo.freebig = 0; 173 sysinfo.mem_unit = 1; 174 175 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 176 } 177 178 int 179 linux_alarm(struct thread *td, struct linux_alarm_args *args) 180 { 181 struct itimerval it, old_it; 182 u_int secs; 183 int error; 184 185 #ifdef DEBUG 186 if (ldebug(alarm)) 187 printf(ARGS(alarm, "%u"), args->secs); 188 #endif 189 190 secs = args->secs; 191 192 if (secs > INT_MAX) 193 secs = INT_MAX; 194 195 it.it_value.tv_sec = (long) secs; 196 it.it_value.tv_usec = 0; 197 it.it_interval.tv_sec = 0; 198 it.it_interval.tv_usec = 0; 199 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 200 if (error) 201 return (error); 202 if (timevalisset(&old_it.it_value)) { 203 if (old_it.it_value.tv_usec != 0) 204 old_it.it_value.tv_sec++; 205 td->td_retval[0] = old_it.it_value.tv_sec; 206 } 207 return (0); 208 } 209 210 int 211 linux_brk(struct thread *td, struct linux_brk_args *args) 212 { 213 struct vmspace *vm = td->td_proc->p_vmspace; 214 vm_offset_t new, old; 215 struct obreak_args /* { 216 char * nsize; 217 } */ tmp; 218 219 #ifdef DEBUG 220 if (ldebug(brk)) 221 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 222 #endif 223 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 224 new = (vm_offset_t)args->dsend; 225 tmp.nsize = (char *)new; 226 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 227 td->td_retval[0] = (long)new; 228 else 229 td->td_retval[0] = (long)old; 230 231 return (0); 232 } 233 234 #if defined(__i386__) 235 /* XXX: what about amd64/linux32? */ 236 237 int 238 linux_uselib(struct thread *td, struct linux_uselib_args *args) 239 { 240 struct nameidata ni; 241 struct vnode *vp; 242 struct exec *a_out; 243 struct vattr attr; 244 vm_offset_t vmaddr; 245 unsigned long file_offset; 246 unsigned long bss_size; 247 char *library; 248 ssize_t aresid; 249 int error; 250 int locked, vfslocked; 251 252 LCONVPATHEXIST(td, args->library, &library); 253 254 #ifdef DEBUG 255 if (ldebug(uselib)) 256 printf(ARGS(uselib, "%s"), library); 257 #endif 258 259 a_out = NULL; 260 vfslocked = 0; 261 locked = 0; 262 vp = NULL; 263 264 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | MPSAFE | AUDITVNODE1, 265 UIO_SYSSPACE, library, td); 266 error = namei(&ni); 267 LFREEPATH(library); 268 if (error) 269 goto cleanup; 270 271 vp = ni.ni_vp; 272 vfslocked = NDHASGIANT(&ni); 273 NDFREE(&ni, NDF_ONLY_PNBUF); 274 275 /* 276 * From here on down, we have a locked vnode that must be unlocked. 277 * XXX: The code below largely duplicates exec_check_permissions(). 278 */ 279 locked = 1; 280 281 /* Writable? */ 282 if (vp->v_writecount) { 283 error = ETXTBSY; 284 goto cleanup; 285 } 286 287 /* Executable? */ 288 error = VOP_GETATTR(vp, &attr, td->td_ucred); 289 if (error) 290 goto cleanup; 291 292 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 293 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 294 /* EACCESS is what exec(2) returns. */ 295 error = ENOEXEC; 296 goto cleanup; 297 } 298 299 /* Sensible size? */ 300 if (attr.va_size == 0) { 301 error = ENOEXEC; 302 goto cleanup; 303 } 304 305 /* Can we access it? */ 306 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 307 if (error) 308 goto cleanup; 309 310 /* 311 * XXX: This should use vn_open() so that it is properly authorized, 312 * and to reduce code redundancy all over the place here. 313 * XXX: Not really, it duplicates far more of exec_check_permissions() 314 * than vn_open(). 315 */ 316 #ifdef MAC 317 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 318 if (error) 319 goto cleanup; 320 #endif 321 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 322 if (error) 323 goto cleanup; 324 325 /* Pull in executable header into exec_map */ 326 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 327 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 328 if (error) 329 goto cleanup; 330 331 /* Is it a Linux binary ? */ 332 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 333 error = ENOEXEC; 334 goto cleanup; 335 } 336 337 /* 338 * While we are here, we should REALLY do some more checks 339 */ 340 341 /* Set file/virtual offset based on a.out variant. */ 342 switch ((int)(a_out->a_magic & 0xffff)) { 343 case 0413: /* ZMAGIC */ 344 file_offset = 1024; 345 break; 346 case 0314: /* QMAGIC */ 347 file_offset = 0; 348 break; 349 default: 350 error = ENOEXEC; 351 goto cleanup; 352 } 353 354 bss_size = round_page(a_out->a_bss); 355 356 /* Check various fields in header for validity/bounds. */ 357 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 358 error = ENOEXEC; 359 goto cleanup; 360 } 361 362 /* text + data can't exceed file size */ 363 if (a_out->a_data + a_out->a_text > attr.va_size) { 364 error = EFAULT; 365 goto cleanup; 366 } 367 368 /* 369 * text/data/bss must not exceed limits 370 * XXX - this is not complete. it should check current usage PLUS 371 * the resources needed by this library. 372 */ 373 PROC_LOCK(td->td_proc); 374 if (a_out->a_text > maxtsiz || 375 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA) || 376 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 377 bss_size) != 0) { 378 PROC_UNLOCK(td->td_proc); 379 error = ENOMEM; 380 goto cleanup; 381 } 382 PROC_UNLOCK(td->td_proc); 383 384 /* 385 * Prevent more writers. 386 * XXX: Note that if any of the VM operations fail below we don't 387 * clear this flag. 388 */ 389 vp->v_vflag |= VV_TEXT; 390 391 /* 392 * Lock no longer needed 393 */ 394 locked = 0; 395 VOP_UNLOCK(vp, 0); 396 VFS_UNLOCK_GIANT(vfslocked); 397 398 /* 399 * Check if file_offset page aligned. Currently we cannot handle 400 * misalinged file offsets, and so we read in the entire image 401 * (what a waste). 402 */ 403 if (file_offset & PAGE_MASK) { 404 #ifdef DEBUG 405 printf("uselib: Non page aligned binary %lu\n", file_offset); 406 #endif 407 /* Map text+data read/write/execute */ 408 409 /* a_entry is the load address and is page aligned */ 410 vmaddr = trunc_page(a_out->a_entry); 411 412 /* get anon user mapping, read+write+execute */ 413 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 414 &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, 415 VM_PROT_ALL, 0); 416 if (error) 417 goto cleanup; 418 419 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 420 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 421 td->td_ucred, NOCRED, &aresid, td); 422 if (error != 0) 423 goto cleanup; 424 if (aresid != 0) { 425 error = ENOEXEC; 426 goto cleanup; 427 } 428 } else { 429 #ifdef DEBUG 430 printf("uselib: Page aligned binary %lu\n", file_offset); 431 #endif 432 /* 433 * for QMAGIC, a_entry is 20 bytes beyond the load address 434 * to skip the executable header 435 */ 436 vmaddr = trunc_page(a_out->a_entry); 437 438 /* 439 * Map it all into the process's space as a single 440 * copy-on-write "data" segment. 441 */ 442 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 443 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 444 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 445 if (error) 446 goto cleanup; 447 } 448 #ifdef DEBUG 449 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 450 ((long *)vmaddr)[1]); 451 #endif 452 if (bss_size != 0) { 453 /* Calculate BSS start address */ 454 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 455 a_out->a_data; 456 457 /* allocate some 'anon' space */ 458 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 459 &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); 460 if (error) 461 goto cleanup; 462 } 463 464 cleanup: 465 /* Unlock vnode if needed */ 466 if (locked) { 467 VOP_UNLOCK(vp, 0); 468 VFS_UNLOCK_GIANT(vfslocked); 469 } 470 471 /* Release the temporary mapping. */ 472 if (a_out) 473 kmem_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 474 475 return (error); 476 } 477 478 #endif /* __i386__ */ 479 480 int 481 linux_select(struct thread *td, struct linux_select_args *args) 482 { 483 l_timeval ltv; 484 struct timeval tv0, tv1, utv, *tvp; 485 int error; 486 487 #ifdef DEBUG 488 if (ldebug(select)) 489 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 490 (void *)args->readfds, (void *)args->writefds, 491 (void *)args->exceptfds, (void *)args->timeout); 492 #endif 493 494 /* 495 * Store current time for computation of the amount of 496 * time left. 497 */ 498 if (args->timeout) { 499 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 500 goto select_out; 501 utv.tv_sec = ltv.tv_sec; 502 utv.tv_usec = ltv.tv_usec; 503 #ifdef DEBUG 504 if (ldebug(select)) 505 printf(LMSG("incoming timeout (%jd/%ld)"), 506 (intmax_t)utv.tv_sec, utv.tv_usec); 507 #endif 508 509 if (itimerfix(&utv)) { 510 /* 511 * The timeval was invalid. Convert it to something 512 * valid that will act as it does under Linux. 513 */ 514 utv.tv_sec += utv.tv_usec / 1000000; 515 utv.tv_usec %= 1000000; 516 if (utv.tv_usec < 0) { 517 utv.tv_sec -= 1; 518 utv.tv_usec += 1000000; 519 } 520 if (utv.tv_sec < 0) 521 timevalclear(&utv); 522 } 523 microtime(&tv0); 524 tvp = &utv; 525 } else 526 tvp = NULL; 527 528 error = kern_select(td, args->nfds, args->readfds, args->writefds, 529 args->exceptfds, tvp, sizeof(l_int) * 8); 530 531 #ifdef DEBUG 532 if (ldebug(select)) 533 printf(LMSG("real select returns %d"), error); 534 #endif 535 if (error) 536 goto select_out; 537 538 if (args->timeout) { 539 if (td->td_retval[0]) { 540 /* 541 * Compute how much time was left of the timeout, 542 * by subtracting the current time and the time 543 * before we started the call, and subtracting 544 * that result from the user-supplied value. 545 */ 546 microtime(&tv1); 547 timevalsub(&tv1, &tv0); 548 timevalsub(&utv, &tv1); 549 if (utv.tv_sec < 0) 550 timevalclear(&utv); 551 } else 552 timevalclear(&utv); 553 #ifdef DEBUG 554 if (ldebug(select)) 555 printf(LMSG("outgoing timeout (%jd/%ld)"), 556 (intmax_t)utv.tv_sec, utv.tv_usec); 557 #endif 558 ltv.tv_sec = utv.tv_sec; 559 ltv.tv_usec = utv.tv_usec; 560 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 561 goto select_out; 562 } 563 564 select_out: 565 #ifdef DEBUG 566 if (ldebug(select)) 567 printf(LMSG("select_out -> %d"), error); 568 #endif 569 return (error); 570 } 571 572 int 573 linux_mremap(struct thread *td, struct linux_mremap_args *args) 574 { 575 struct munmap_args /* { 576 void *addr; 577 size_t len; 578 } */ bsd_args; 579 int error = 0; 580 581 #ifdef DEBUG 582 if (ldebug(mremap)) 583 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 584 (void *)(uintptr_t)args->addr, 585 (unsigned long)args->old_len, 586 (unsigned long)args->new_len, 587 (unsigned long)args->flags); 588 #endif 589 590 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 591 td->td_retval[0] = 0; 592 return (EINVAL); 593 } 594 595 /* 596 * Check for the page alignment. 597 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 598 */ 599 if (args->addr & PAGE_MASK) { 600 td->td_retval[0] = 0; 601 return (EINVAL); 602 } 603 604 args->new_len = round_page(args->new_len); 605 args->old_len = round_page(args->old_len); 606 607 if (args->new_len > args->old_len) { 608 td->td_retval[0] = 0; 609 return (ENOMEM); 610 } 611 612 if (args->new_len < args->old_len) { 613 bsd_args.addr = 614 (caddr_t)((uintptr_t)args->addr + args->new_len); 615 bsd_args.len = args->old_len - args->new_len; 616 error = sys_munmap(td, &bsd_args); 617 } 618 619 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 620 return (error); 621 } 622 623 #define LINUX_MS_ASYNC 0x0001 624 #define LINUX_MS_INVALIDATE 0x0002 625 #define LINUX_MS_SYNC 0x0004 626 627 int 628 linux_msync(struct thread *td, struct linux_msync_args *args) 629 { 630 struct msync_args bsd_args; 631 632 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 633 bsd_args.len = (uintptr_t)args->len; 634 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 635 636 return (sys_msync(td, &bsd_args)); 637 } 638 639 int 640 linux_time(struct thread *td, struct linux_time_args *args) 641 { 642 struct timeval tv; 643 l_time_t tm; 644 int error; 645 646 #ifdef DEBUG 647 if (ldebug(time)) 648 printf(ARGS(time, "*")); 649 #endif 650 651 microtime(&tv); 652 tm = tv.tv_sec; 653 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 654 return (error); 655 td->td_retval[0] = tm; 656 return (0); 657 } 658 659 struct l_times_argv { 660 l_clock_t tms_utime; 661 l_clock_t tms_stime; 662 l_clock_t tms_cutime; 663 l_clock_t tms_cstime; 664 }; 665 666 667 /* 668 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 669 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 670 * auxiliary vector entry. 671 */ 672 #define CLK_TCK 100 673 674 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 675 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 676 677 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 678 CONVNTCK(r) : CONVOTCK(r)) 679 680 int 681 linux_times(struct thread *td, struct linux_times_args *args) 682 { 683 struct timeval tv, utime, stime, cutime, cstime; 684 struct l_times_argv tms; 685 struct proc *p; 686 int error; 687 688 #ifdef DEBUG 689 if (ldebug(times)) 690 printf(ARGS(times, "*")); 691 #endif 692 693 if (args->buf != NULL) { 694 p = td->td_proc; 695 PROC_LOCK(p); 696 PROC_SLOCK(p); 697 calcru(p, &utime, &stime); 698 PROC_SUNLOCK(p); 699 calccru(p, &cutime, &cstime); 700 PROC_UNLOCK(p); 701 702 tms.tms_utime = CONVTCK(utime); 703 tms.tms_stime = CONVTCK(stime); 704 705 tms.tms_cutime = CONVTCK(cutime); 706 tms.tms_cstime = CONVTCK(cstime); 707 708 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 709 return (error); 710 } 711 712 microuptime(&tv); 713 td->td_retval[0] = (int)CONVTCK(tv); 714 return (0); 715 } 716 717 int 718 linux_newuname(struct thread *td, struct linux_newuname_args *args) 719 { 720 struct l_new_utsname utsname; 721 char osname[LINUX_MAX_UTSNAME]; 722 char osrelease[LINUX_MAX_UTSNAME]; 723 char *p; 724 725 #ifdef DEBUG 726 if (ldebug(newuname)) 727 printf(ARGS(newuname, "*")); 728 #endif 729 730 linux_get_osname(td, osname); 731 linux_get_osrelease(td, osrelease); 732 733 bzero(&utsname, sizeof(utsname)); 734 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 735 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 736 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 737 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 738 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 739 for (p = utsname.version; *p != '\0'; ++p) 740 if (*p == '\n') { 741 *p = '\0'; 742 break; 743 } 744 strlcpy(utsname.machine, linux_platform, LINUX_MAX_UTSNAME); 745 746 return (copyout(&utsname, args->buf, sizeof(utsname))); 747 } 748 749 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 750 struct l_utimbuf { 751 l_time_t l_actime; 752 l_time_t l_modtime; 753 }; 754 755 int 756 linux_utime(struct thread *td, struct linux_utime_args *args) 757 { 758 struct timeval tv[2], *tvp; 759 struct l_utimbuf lut; 760 char *fname; 761 int error; 762 763 LCONVPATHEXIST(td, args->fname, &fname); 764 765 #ifdef DEBUG 766 if (ldebug(utime)) 767 printf(ARGS(utime, "%s, *"), fname); 768 #endif 769 770 if (args->times) { 771 if ((error = copyin(args->times, &lut, sizeof lut))) { 772 LFREEPATH(fname); 773 return (error); 774 } 775 tv[0].tv_sec = lut.l_actime; 776 tv[0].tv_usec = 0; 777 tv[1].tv_sec = lut.l_modtime; 778 tv[1].tv_usec = 0; 779 tvp = tv; 780 } else 781 tvp = NULL; 782 783 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 784 LFREEPATH(fname); 785 return (error); 786 } 787 788 int 789 linux_utimes(struct thread *td, struct linux_utimes_args *args) 790 { 791 l_timeval ltv[2]; 792 struct timeval tv[2], *tvp = NULL; 793 char *fname; 794 int error; 795 796 LCONVPATHEXIST(td, args->fname, &fname); 797 798 #ifdef DEBUG 799 if (ldebug(utimes)) 800 printf(ARGS(utimes, "%s, *"), fname); 801 #endif 802 803 if (args->tptr != NULL) { 804 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 805 LFREEPATH(fname); 806 return (error); 807 } 808 tv[0].tv_sec = ltv[0].tv_sec; 809 tv[0].tv_usec = ltv[0].tv_usec; 810 tv[1].tv_sec = ltv[1].tv_sec; 811 tv[1].tv_usec = ltv[1].tv_usec; 812 tvp = tv; 813 } 814 815 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 816 LFREEPATH(fname); 817 return (error); 818 } 819 820 int 821 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 822 { 823 l_timeval ltv[2]; 824 struct timeval tv[2], *tvp = NULL; 825 char *fname; 826 int error, dfd; 827 828 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 829 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 830 831 #ifdef DEBUG 832 if (ldebug(futimesat)) 833 printf(ARGS(futimesat, "%s, *"), fname); 834 #endif 835 836 if (args->utimes != NULL) { 837 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 838 LFREEPATH(fname); 839 return (error); 840 } 841 tv[0].tv_sec = ltv[0].tv_sec; 842 tv[0].tv_usec = ltv[0].tv_usec; 843 tv[1].tv_sec = ltv[1].tv_sec; 844 tv[1].tv_usec = ltv[1].tv_usec; 845 tvp = tv; 846 } 847 848 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 849 LFREEPATH(fname); 850 return (error); 851 } 852 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 853 854 int 855 linux_common_wait(struct thread *td, int pid, int *status, 856 int options, struct rusage *ru) 857 { 858 int error, tmpstat; 859 860 error = kern_wait(td, pid, &tmpstat, options, ru); 861 if (error) 862 return (error); 863 864 if (status) { 865 tmpstat &= 0xffff; 866 if (WIFSIGNALED(tmpstat)) 867 tmpstat = (tmpstat & 0xffffff80) | 868 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 869 else if (WIFSTOPPED(tmpstat)) 870 tmpstat = (tmpstat & 0xffff00ff) | 871 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 872 error = copyout(&tmpstat, status, sizeof(int)); 873 } 874 875 return (error); 876 } 877 878 int 879 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 880 { 881 int options; 882 883 #ifdef DEBUG 884 if (ldebug(waitpid)) 885 printf(ARGS(waitpid, "%d, %p, %d"), 886 args->pid, (void *)args->status, args->options); 887 #endif 888 /* 889 * this is necessary because the test in kern_wait doesn't work 890 * because we mess with the options here 891 */ 892 if (args->options & ~(WUNTRACED | WNOHANG | WCONTINUED | __WCLONE)) 893 return (EINVAL); 894 895 options = (args->options & (WNOHANG | WUNTRACED)); 896 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 897 if (args->options & __WCLONE) 898 options |= WLINUXCLONE; 899 900 return (linux_common_wait(td, args->pid, args->status, options, NULL)); 901 } 902 903 904 int 905 linux_mknod(struct thread *td, struct linux_mknod_args *args) 906 { 907 char *path; 908 int error; 909 910 LCONVPATHCREAT(td, args->path, &path); 911 912 #ifdef DEBUG 913 if (ldebug(mknod)) 914 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 915 #endif 916 917 switch (args->mode & S_IFMT) { 918 case S_IFIFO: 919 case S_IFSOCK: 920 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 921 break; 922 923 case S_IFCHR: 924 case S_IFBLK: 925 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 926 args->dev); 927 break; 928 929 case S_IFDIR: 930 error = EPERM; 931 break; 932 933 case 0: 934 args->mode |= S_IFREG; 935 /* FALLTHROUGH */ 936 case S_IFREG: 937 error = kern_open(td, path, UIO_SYSSPACE, 938 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 939 if (error == 0) 940 kern_close(td, td->td_retval[0]); 941 break; 942 943 default: 944 error = EINVAL; 945 break; 946 } 947 LFREEPATH(path); 948 return (error); 949 } 950 951 int 952 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 953 { 954 char *path; 955 int error, dfd; 956 957 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 958 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 959 960 #ifdef DEBUG 961 if (ldebug(mknodat)) 962 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 963 #endif 964 965 switch (args->mode & S_IFMT) { 966 case S_IFIFO: 967 case S_IFSOCK: 968 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 969 break; 970 971 case S_IFCHR: 972 case S_IFBLK: 973 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 974 args->dev); 975 break; 976 977 case S_IFDIR: 978 error = EPERM; 979 break; 980 981 case 0: 982 args->mode |= S_IFREG; 983 /* FALLTHROUGH */ 984 case S_IFREG: 985 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 986 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 987 if (error == 0) 988 kern_close(td, td->td_retval[0]); 989 break; 990 991 default: 992 error = EINVAL; 993 break; 994 } 995 LFREEPATH(path); 996 return (error); 997 } 998 999 /* 1000 * UGH! This is just about the dumbest idea I've ever heard!! 1001 */ 1002 int 1003 linux_personality(struct thread *td, struct linux_personality_args *args) 1004 { 1005 #ifdef DEBUG 1006 if (ldebug(personality)) 1007 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 1008 #endif 1009 if (args->per != 0) 1010 return (EINVAL); 1011 1012 /* Yes Jim, it's still a Linux... */ 1013 td->td_retval[0] = 0; 1014 return (0); 1015 } 1016 1017 struct l_itimerval { 1018 l_timeval it_interval; 1019 l_timeval it_value; 1020 }; 1021 1022 #define B2L_ITIMERVAL(bip, lip) \ 1023 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1024 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1025 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1026 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1027 1028 int 1029 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1030 { 1031 int error; 1032 struct l_itimerval ls; 1033 struct itimerval aitv, oitv; 1034 1035 #ifdef DEBUG 1036 if (ldebug(setitimer)) 1037 printf(ARGS(setitimer, "%p, %p"), 1038 (void *)uap->itv, (void *)uap->oitv); 1039 #endif 1040 1041 if (uap->itv == NULL) { 1042 uap->itv = uap->oitv; 1043 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1044 } 1045 1046 error = copyin(uap->itv, &ls, sizeof(ls)); 1047 if (error != 0) 1048 return (error); 1049 B2L_ITIMERVAL(&aitv, &ls); 1050 #ifdef DEBUG 1051 if (ldebug(setitimer)) { 1052 printf("setitimer: value: sec: %jd, usec: %ld\n", 1053 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1054 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1055 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1056 } 1057 #endif 1058 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1059 if (error != 0 || uap->oitv == NULL) 1060 return (error); 1061 B2L_ITIMERVAL(&ls, &oitv); 1062 1063 return (copyout(&ls, uap->oitv, sizeof(ls))); 1064 } 1065 1066 int 1067 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1068 { 1069 int error; 1070 struct l_itimerval ls; 1071 struct itimerval aitv; 1072 1073 #ifdef DEBUG 1074 if (ldebug(getitimer)) 1075 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1076 #endif 1077 error = kern_getitimer(td, uap->which, &aitv); 1078 if (error != 0) 1079 return (error); 1080 B2L_ITIMERVAL(&ls, &aitv); 1081 return (copyout(&ls, uap->itv, sizeof(ls))); 1082 } 1083 1084 int 1085 linux_nice(struct thread *td, struct linux_nice_args *args) 1086 { 1087 struct setpriority_args bsd_args; 1088 1089 bsd_args.which = PRIO_PROCESS; 1090 bsd_args.who = 0; /* current process */ 1091 bsd_args.prio = args->inc; 1092 return (sys_setpriority(td, &bsd_args)); 1093 } 1094 1095 int 1096 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1097 { 1098 struct ucred *newcred, *oldcred; 1099 l_gid_t *linux_gidset; 1100 gid_t *bsd_gidset; 1101 int ngrp, error; 1102 struct proc *p; 1103 1104 ngrp = args->gidsetsize; 1105 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1106 return (EINVAL); 1107 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_TEMP, M_WAITOK); 1108 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1109 if (error) 1110 goto out; 1111 newcred = crget(); 1112 p = td->td_proc; 1113 PROC_LOCK(p); 1114 oldcred = crcopysafe(p, newcred); 1115 1116 /* 1117 * cr_groups[0] holds egid. Setting the whole set from 1118 * the supplied set will cause egid to be changed too. 1119 * Keep cr_groups[0] unchanged to prevent that. 1120 */ 1121 1122 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1123 PROC_UNLOCK(p); 1124 crfree(newcred); 1125 goto out; 1126 } 1127 1128 if (ngrp > 0) { 1129 newcred->cr_ngroups = ngrp + 1; 1130 1131 bsd_gidset = newcred->cr_groups; 1132 ngrp--; 1133 while (ngrp >= 0) { 1134 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1135 ngrp--; 1136 } 1137 } else 1138 newcred->cr_ngroups = 1; 1139 1140 setsugid(p); 1141 p->p_ucred = newcred; 1142 PROC_UNLOCK(p); 1143 crfree(oldcred); 1144 error = 0; 1145 out: 1146 free(linux_gidset, M_TEMP); 1147 return (error); 1148 } 1149 1150 int 1151 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1152 { 1153 struct ucred *cred; 1154 l_gid_t *linux_gidset; 1155 gid_t *bsd_gidset; 1156 int bsd_gidsetsz, ngrp, error; 1157 1158 cred = td->td_ucred; 1159 bsd_gidset = cred->cr_groups; 1160 bsd_gidsetsz = cred->cr_ngroups - 1; 1161 1162 /* 1163 * cr_groups[0] holds egid. Returning the whole set 1164 * here will cause a duplicate. Exclude cr_groups[0] 1165 * to prevent that. 1166 */ 1167 1168 if ((ngrp = args->gidsetsize) == 0) { 1169 td->td_retval[0] = bsd_gidsetsz; 1170 return (0); 1171 } 1172 1173 if (ngrp < bsd_gidsetsz) 1174 return (EINVAL); 1175 1176 ngrp = 0; 1177 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1178 M_TEMP, M_WAITOK); 1179 while (ngrp < bsd_gidsetsz) { 1180 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1181 ngrp++; 1182 } 1183 1184 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1185 free(linux_gidset, M_TEMP); 1186 if (error) 1187 return (error); 1188 1189 td->td_retval[0] = ngrp; 1190 return (0); 1191 } 1192 1193 int 1194 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1195 { 1196 struct rlimit bsd_rlim; 1197 struct l_rlimit rlim; 1198 u_int which; 1199 int error; 1200 1201 #ifdef DEBUG 1202 if (ldebug(setrlimit)) 1203 printf(ARGS(setrlimit, "%d, %p"), 1204 args->resource, (void *)args->rlim); 1205 #endif 1206 1207 if (args->resource >= LINUX_RLIM_NLIMITS) 1208 return (EINVAL); 1209 1210 which = linux_to_bsd_resource[args->resource]; 1211 if (which == -1) 1212 return (EINVAL); 1213 1214 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1215 if (error) 1216 return (error); 1217 1218 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1219 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1220 return (kern_setrlimit(td, which, &bsd_rlim)); 1221 } 1222 1223 int 1224 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1225 { 1226 struct l_rlimit rlim; 1227 struct proc *p = td->td_proc; 1228 struct rlimit bsd_rlim; 1229 u_int which; 1230 1231 #ifdef DEBUG 1232 if (ldebug(old_getrlimit)) 1233 printf(ARGS(old_getrlimit, "%d, %p"), 1234 args->resource, (void *)args->rlim); 1235 #endif 1236 1237 if (args->resource >= LINUX_RLIM_NLIMITS) 1238 return (EINVAL); 1239 1240 which = linux_to_bsd_resource[args->resource]; 1241 if (which == -1) 1242 return (EINVAL); 1243 1244 PROC_LOCK(p); 1245 lim_rlimit(p, which, &bsd_rlim); 1246 PROC_UNLOCK(p); 1247 1248 #ifdef COMPAT_LINUX32 1249 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1250 if (rlim.rlim_cur == UINT_MAX) 1251 rlim.rlim_cur = INT_MAX; 1252 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1253 if (rlim.rlim_max == UINT_MAX) 1254 rlim.rlim_max = INT_MAX; 1255 #else 1256 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1257 if (rlim.rlim_cur == ULONG_MAX) 1258 rlim.rlim_cur = LONG_MAX; 1259 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1260 if (rlim.rlim_max == ULONG_MAX) 1261 rlim.rlim_max = LONG_MAX; 1262 #endif 1263 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1264 } 1265 1266 int 1267 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1268 { 1269 struct l_rlimit rlim; 1270 struct proc *p = td->td_proc; 1271 struct rlimit bsd_rlim; 1272 u_int which; 1273 1274 #ifdef DEBUG 1275 if (ldebug(getrlimit)) 1276 printf(ARGS(getrlimit, "%d, %p"), 1277 args->resource, (void *)args->rlim); 1278 #endif 1279 1280 if (args->resource >= LINUX_RLIM_NLIMITS) 1281 return (EINVAL); 1282 1283 which = linux_to_bsd_resource[args->resource]; 1284 if (which == -1) 1285 return (EINVAL); 1286 1287 PROC_LOCK(p); 1288 lim_rlimit(p, which, &bsd_rlim); 1289 PROC_UNLOCK(p); 1290 1291 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1292 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1293 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1294 } 1295 1296 int 1297 linux_sched_setscheduler(struct thread *td, 1298 struct linux_sched_setscheduler_args *args) 1299 { 1300 struct sched_setscheduler_args bsd; 1301 1302 #ifdef DEBUG 1303 if (ldebug(sched_setscheduler)) 1304 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1305 args->pid, args->policy, (const void *)args->param); 1306 #endif 1307 1308 switch (args->policy) { 1309 case LINUX_SCHED_OTHER: 1310 bsd.policy = SCHED_OTHER; 1311 break; 1312 case LINUX_SCHED_FIFO: 1313 bsd.policy = SCHED_FIFO; 1314 break; 1315 case LINUX_SCHED_RR: 1316 bsd.policy = SCHED_RR; 1317 break; 1318 default: 1319 return (EINVAL); 1320 } 1321 1322 bsd.pid = args->pid; 1323 bsd.param = (struct sched_param *)args->param; 1324 return (sys_sched_setscheduler(td, &bsd)); 1325 } 1326 1327 int 1328 linux_sched_getscheduler(struct thread *td, 1329 struct linux_sched_getscheduler_args *args) 1330 { 1331 struct sched_getscheduler_args bsd; 1332 int error; 1333 1334 #ifdef DEBUG 1335 if (ldebug(sched_getscheduler)) 1336 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1337 #endif 1338 1339 bsd.pid = args->pid; 1340 error = sys_sched_getscheduler(td, &bsd); 1341 1342 switch (td->td_retval[0]) { 1343 case SCHED_OTHER: 1344 td->td_retval[0] = LINUX_SCHED_OTHER; 1345 break; 1346 case SCHED_FIFO: 1347 td->td_retval[0] = LINUX_SCHED_FIFO; 1348 break; 1349 case SCHED_RR: 1350 td->td_retval[0] = LINUX_SCHED_RR; 1351 break; 1352 } 1353 1354 return (error); 1355 } 1356 1357 int 1358 linux_sched_get_priority_max(struct thread *td, 1359 struct linux_sched_get_priority_max_args *args) 1360 { 1361 struct sched_get_priority_max_args bsd; 1362 1363 #ifdef DEBUG 1364 if (ldebug(sched_get_priority_max)) 1365 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1366 #endif 1367 1368 switch (args->policy) { 1369 case LINUX_SCHED_OTHER: 1370 bsd.policy = SCHED_OTHER; 1371 break; 1372 case LINUX_SCHED_FIFO: 1373 bsd.policy = SCHED_FIFO; 1374 break; 1375 case LINUX_SCHED_RR: 1376 bsd.policy = SCHED_RR; 1377 break; 1378 default: 1379 return (EINVAL); 1380 } 1381 return (sys_sched_get_priority_max(td, &bsd)); 1382 } 1383 1384 int 1385 linux_sched_get_priority_min(struct thread *td, 1386 struct linux_sched_get_priority_min_args *args) 1387 { 1388 struct sched_get_priority_min_args bsd; 1389 1390 #ifdef DEBUG 1391 if (ldebug(sched_get_priority_min)) 1392 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1393 #endif 1394 1395 switch (args->policy) { 1396 case LINUX_SCHED_OTHER: 1397 bsd.policy = SCHED_OTHER; 1398 break; 1399 case LINUX_SCHED_FIFO: 1400 bsd.policy = SCHED_FIFO; 1401 break; 1402 case LINUX_SCHED_RR: 1403 bsd.policy = SCHED_RR; 1404 break; 1405 default: 1406 return (EINVAL); 1407 } 1408 return (sys_sched_get_priority_min(td, &bsd)); 1409 } 1410 1411 #define REBOOT_CAD_ON 0x89abcdef 1412 #define REBOOT_CAD_OFF 0 1413 #define REBOOT_HALT 0xcdef0123 1414 #define REBOOT_RESTART 0x01234567 1415 #define REBOOT_RESTART2 0xA1B2C3D4 1416 #define REBOOT_POWEROFF 0x4321FEDC 1417 #define REBOOT_MAGIC1 0xfee1dead 1418 #define REBOOT_MAGIC2 0x28121969 1419 #define REBOOT_MAGIC2A 0x05121996 1420 #define REBOOT_MAGIC2B 0x16041998 1421 1422 int 1423 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1424 { 1425 struct reboot_args bsd_args; 1426 1427 #ifdef DEBUG 1428 if (ldebug(reboot)) 1429 printf(ARGS(reboot, "0x%x"), args->cmd); 1430 #endif 1431 1432 if (args->magic1 != REBOOT_MAGIC1) 1433 return (EINVAL); 1434 1435 switch (args->magic2) { 1436 case REBOOT_MAGIC2: 1437 case REBOOT_MAGIC2A: 1438 case REBOOT_MAGIC2B: 1439 break; 1440 default: 1441 return (EINVAL); 1442 } 1443 1444 switch (args->cmd) { 1445 case REBOOT_CAD_ON: 1446 case REBOOT_CAD_OFF: 1447 return (priv_check(td, PRIV_REBOOT)); 1448 case REBOOT_HALT: 1449 bsd_args.opt = RB_HALT; 1450 break; 1451 case REBOOT_RESTART: 1452 case REBOOT_RESTART2: 1453 bsd_args.opt = 0; 1454 break; 1455 case REBOOT_POWEROFF: 1456 bsd_args.opt = RB_POWEROFF; 1457 break; 1458 default: 1459 return (EINVAL); 1460 } 1461 return (sys_reboot(td, &bsd_args)); 1462 } 1463 1464 1465 /* 1466 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1467 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1468 * are assumed to be preserved. The following lightweight syscalls fixes 1469 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1470 * 1471 * linux_getpid() - MP SAFE 1472 * linux_getgid() - MP SAFE 1473 * linux_getuid() - MP SAFE 1474 */ 1475 1476 int 1477 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1478 { 1479 struct linux_emuldata *em; 1480 1481 #ifdef DEBUG 1482 if (ldebug(getpid)) 1483 printf(ARGS(getpid, "")); 1484 #endif 1485 1486 if (linux_use26(td)) { 1487 em = em_find(td->td_proc, EMUL_DONTLOCK); 1488 KASSERT(em != NULL, ("getpid: emuldata not found.\n")); 1489 td->td_retval[0] = em->shared->group_pid; 1490 } else { 1491 td->td_retval[0] = td->td_proc->p_pid; 1492 } 1493 1494 return (0); 1495 } 1496 1497 int 1498 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1499 { 1500 1501 #ifdef DEBUG 1502 if (ldebug(gettid)) 1503 printf(ARGS(gettid, "")); 1504 #endif 1505 1506 td->td_retval[0] = td->td_proc->p_pid; 1507 return (0); 1508 } 1509 1510 1511 int 1512 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1513 { 1514 struct linux_emuldata *em; 1515 struct proc *p, *pp; 1516 1517 #ifdef DEBUG 1518 if (ldebug(getppid)) 1519 printf(ARGS(getppid, "")); 1520 #endif 1521 1522 if (!linux_use26(td)) { 1523 PROC_LOCK(td->td_proc); 1524 td->td_retval[0] = td->td_proc->p_pptr->p_pid; 1525 PROC_UNLOCK(td->td_proc); 1526 return (0); 1527 } 1528 1529 em = em_find(td->td_proc, EMUL_DONTLOCK); 1530 1531 KASSERT(em != NULL, ("getppid: process emuldata not found.\n")); 1532 1533 /* find the group leader */ 1534 p = pfind(em->shared->group_pid); 1535 1536 if (p == NULL) { 1537 #ifdef DEBUG 1538 printf(LMSG("parent process not found.\n")); 1539 #endif 1540 return (0); 1541 } 1542 1543 pp = p->p_pptr; /* switch to parent */ 1544 PROC_LOCK(pp); 1545 PROC_UNLOCK(p); 1546 1547 /* if its also linux process */ 1548 if (pp->p_sysent == &elf_linux_sysvec) { 1549 em = em_find(pp, EMUL_DONTLOCK); 1550 KASSERT(em != NULL, ("getppid: parent emuldata not found.\n")); 1551 1552 td->td_retval[0] = em->shared->group_pid; 1553 } else 1554 td->td_retval[0] = pp->p_pid; 1555 1556 PROC_UNLOCK(pp); 1557 1558 return (0); 1559 } 1560 1561 int 1562 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1563 { 1564 1565 #ifdef DEBUG 1566 if (ldebug(getgid)) 1567 printf(ARGS(getgid, "")); 1568 #endif 1569 1570 td->td_retval[0] = td->td_ucred->cr_rgid; 1571 return (0); 1572 } 1573 1574 int 1575 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1576 { 1577 1578 #ifdef DEBUG 1579 if (ldebug(getuid)) 1580 printf(ARGS(getuid, "")); 1581 #endif 1582 1583 td->td_retval[0] = td->td_ucred->cr_ruid; 1584 return (0); 1585 } 1586 1587 1588 int 1589 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1590 { 1591 struct getsid_args bsd; 1592 1593 #ifdef DEBUG 1594 if (ldebug(getsid)) 1595 printf(ARGS(getsid, "%i"), args->pid); 1596 #endif 1597 1598 bsd.pid = args->pid; 1599 return (sys_getsid(td, &bsd)); 1600 } 1601 1602 int 1603 linux_nosys(struct thread *td, struct nosys_args *ignore) 1604 { 1605 1606 return (ENOSYS); 1607 } 1608 1609 int 1610 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1611 { 1612 struct getpriority_args bsd_args; 1613 int error; 1614 1615 #ifdef DEBUG 1616 if (ldebug(getpriority)) 1617 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1618 #endif 1619 1620 bsd_args.which = args->which; 1621 bsd_args.who = args->who; 1622 error = sys_getpriority(td, &bsd_args); 1623 td->td_retval[0] = 20 - td->td_retval[0]; 1624 return (error); 1625 } 1626 1627 int 1628 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1629 { 1630 int name[2]; 1631 1632 #ifdef DEBUG 1633 if (ldebug(sethostname)) 1634 printf(ARGS(sethostname, "*, %i"), args->len); 1635 #endif 1636 1637 name[0] = CTL_KERN; 1638 name[1] = KERN_HOSTNAME; 1639 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1640 args->len, 0, 0)); 1641 } 1642 1643 int 1644 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1645 { 1646 int name[2]; 1647 1648 #ifdef DEBUG 1649 if (ldebug(setdomainname)) 1650 printf(ARGS(setdomainname, "*, %i"), args->len); 1651 #endif 1652 1653 name[0] = CTL_KERN; 1654 name[1] = KERN_NISDOMAINNAME; 1655 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1656 args->len, 0, 0)); 1657 } 1658 1659 int 1660 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1661 { 1662 struct linux_emuldata *em; 1663 1664 #ifdef DEBUG 1665 if (ldebug(exit_group)) 1666 printf(ARGS(exit_group, "%i"), args->error_code); 1667 #endif 1668 1669 em = em_find(td->td_proc, EMUL_DONTLOCK); 1670 if (em->shared->refs > 1) { 1671 EMUL_SHARED_WLOCK(&emul_shared_lock); 1672 em->shared->flags |= EMUL_SHARED_HASXSTAT; 1673 em->shared->xstat = W_EXITCODE(args->error_code, 0); 1674 EMUL_SHARED_WUNLOCK(&emul_shared_lock); 1675 if (linux_use26(td)) 1676 linux_kill_threads(td, SIGKILL); 1677 } 1678 1679 /* 1680 * XXX: we should send a signal to the parent if 1681 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1682 * as it doesnt occur often. 1683 */ 1684 exit1(td, W_EXITCODE(args->error_code, 0)); 1685 1686 return (0); 1687 } 1688 1689 #define _LINUX_CAPABILITY_VERSION 0x19980330 1690 1691 struct l_user_cap_header { 1692 l_int version; 1693 l_int pid; 1694 }; 1695 1696 struct l_user_cap_data { 1697 l_int effective; 1698 l_int permitted; 1699 l_int inheritable; 1700 }; 1701 1702 int 1703 linux_capget(struct thread *td, struct linux_capget_args *args) 1704 { 1705 struct l_user_cap_header luch; 1706 struct l_user_cap_data lucd; 1707 int error; 1708 1709 if (args->hdrp == NULL) 1710 return (EFAULT); 1711 1712 error = copyin(args->hdrp, &luch, sizeof(luch)); 1713 if (error != 0) 1714 return (error); 1715 1716 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1717 luch.version = _LINUX_CAPABILITY_VERSION; 1718 error = copyout(&luch, args->hdrp, sizeof(luch)); 1719 if (error) 1720 return (error); 1721 return (EINVAL); 1722 } 1723 1724 if (luch.pid) 1725 return (EPERM); 1726 1727 if (args->datap) { 1728 /* 1729 * The current implementation doesn't support setting 1730 * a capability (it's essentially a stub) so indicate 1731 * that no capabilities are currently set or available 1732 * to request. 1733 */ 1734 bzero (&lucd, sizeof(lucd)); 1735 error = copyout(&lucd, args->datap, sizeof(lucd)); 1736 } 1737 1738 return (error); 1739 } 1740 1741 int 1742 linux_capset(struct thread *td, struct linux_capset_args *args) 1743 { 1744 struct l_user_cap_header luch; 1745 struct l_user_cap_data lucd; 1746 int error; 1747 1748 if (args->hdrp == NULL || args->datap == NULL) 1749 return (EFAULT); 1750 1751 error = copyin(args->hdrp, &luch, sizeof(luch)); 1752 if (error != 0) 1753 return (error); 1754 1755 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1756 luch.version = _LINUX_CAPABILITY_VERSION; 1757 error = copyout(&luch, args->hdrp, sizeof(luch)); 1758 if (error) 1759 return (error); 1760 return (EINVAL); 1761 } 1762 1763 if (luch.pid) 1764 return (EPERM); 1765 1766 error = copyin(args->datap, &lucd, sizeof(lucd)); 1767 if (error != 0) 1768 return (error); 1769 1770 /* We currently don't support setting any capabilities. */ 1771 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1772 linux_msg(td, 1773 "capset effective=0x%x, permitted=0x%x, " 1774 "inheritable=0x%x is not implemented", 1775 (int)lucd.effective, (int)lucd.permitted, 1776 (int)lucd.inheritable); 1777 return (EPERM); 1778 } 1779 1780 return (0); 1781 } 1782 1783 int 1784 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1785 { 1786 int error = 0, max_size; 1787 struct proc *p = td->td_proc; 1788 char comm[LINUX_MAX_COMM_LEN]; 1789 struct linux_emuldata *em; 1790 int pdeath_signal; 1791 1792 #ifdef DEBUG 1793 if (ldebug(prctl)) 1794 printf(ARGS(prctl, "%d, %d, %d, %d, %d"), args->option, 1795 args->arg2, args->arg3, args->arg4, args->arg5); 1796 #endif 1797 1798 switch (args->option) { 1799 case LINUX_PR_SET_PDEATHSIG: 1800 if (!LINUX_SIG_VALID(args->arg2)) 1801 return (EINVAL); 1802 em = em_find(p, EMUL_DOLOCK); 1803 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1804 em->pdeath_signal = args->arg2; 1805 EMUL_UNLOCK(&emul_lock); 1806 break; 1807 case LINUX_PR_GET_PDEATHSIG: 1808 em = em_find(p, EMUL_DOLOCK); 1809 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1810 pdeath_signal = em->pdeath_signal; 1811 EMUL_UNLOCK(&emul_lock); 1812 error = copyout(&pdeath_signal, 1813 (void *)(register_t)args->arg2, 1814 sizeof(pdeath_signal)); 1815 break; 1816 case LINUX_PR_GET_KEEPCAPS: 1817 /* 1818 * Indicate that we always clear the effective and 1819 * permitted capability sets when the user id becomes 1820 * non-zero (actually the capability sets are simply 1821 * always zero in the current implementation). 1822 */ 1823 td->td_retval[0] = 0; 1824 break; 1825 case LINUX_PR_SET_KEEPCAPS: 1826 /* 1827 * Ignore requests to keep the effective and permitted 1828 * capability sets when the user id becomes non-zero. 1829 */ 1830 break; 1831 case LINUX_PR_SET_NAME: 1832 /* 1833 * To be on the safe side we need to make sure to not 1834 * overflow the size a linux program expects. We already 1835 * do this here in the copyin, so that we don't need to 1836 * check on copyout. 1837 */ 1838 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1839 error = copyinstr((void *)(register_t)args->arg2, comm, 1840 max_size, NULL); 1841 1842 /* Linux silently truncates the name if it is too long. */ 1843 if (error == ENAMETOOLONG) { 1844 /* 1845 * XXX: copyinstr() isn't documented to populate the 1846 * array completely, so do a copyin() to be on the 1847 * safe side. This should be changed in case 1848 * copyinstr() is changed to guarantee this. 1849 */ 1850 error = copyin((void *)(register_t)args->arg2, comm, 1851 max_size - 1); 1852 comm[max_size - 1] = '\0'; 1853 } 1854 if (error) 1855 return (error); 1856 1857 PROC_LOCK(p); 1858 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1859 PROC_UNLOCK(p); 1860 break; 1861 case LINUX_PR_GET_NAME: 1862 PROC_LOCK(p); 1863 strlcpy(comm, p->p_comm, sizeof(comm)); 1864 PROC_UNLOCK(p); 1865 error = copyout(comm, (void *)(register_t)args->arg2, 1866 strlen(comm) + 1); 1867 break; 1868 default: 1869 error = EINVAL; 1870 break; 1871 } 1872 1873 return (error); 1874 } 1875 1876 /* 1877 * Get affinity of a process. 1878 */ 1879 int 1880 linux_sched_getaffinity(struct thread *td, 1881 struct linux_sched_getaffinity_args *args) 1882 { 1883 int error; 1884 struct cpuset_getaffinity_args cga; 1885 1886 #ifdef DEBUG 1887 if (ldebug(sched_getaffinity)) 1888 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 1889 args->len); 1890 #endif 1891 if (args->len < sizeof(cpuset_t)) 1892 return (EINVAL); 1893 1894 cga.level = CPU_LEVEL_WHICH; 1895 cga.which = CPU_WHICH_PID; 1896 cga.id = args->pid; 1897 cga.cpusetsize = sizeof(cpuset_t); 1898 cga.mask = (cpuset_t *) args->user_mask_ptr; 1899 1900 if ((error = sys_cpuset_getaffinity(td, &cga)) == 0) 1901 td->td_retval[0] = sizeof(cpuset_t); 1902 1903 return (error); 1904 } 1905 1906 /* 1907 * Set affinity of a process. 1908 */ 1909 int 1910 linux_sched_setaffinity(struct thread *td, 1911 struct linux_sched_setaffinity_args *args) 1912 { 1913 struct cpuset_setaffinity_args csa; 1914 1915 #ifdef DEBUG 1916 if (ldebug(sched_setaffinity)) 1917 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 1918 args->len); 1919 #endif 1920 if (args->len < sizeof(cpuset_t)) 1921 return (EINVAL); 1922 1923 csa.level = CPU_LEVEL_WHICH; 1924 csa.which = CPU_WHICH_PID; 1925 csa.id = args->pid; 1926 csa.cpusetsize = sizeof(cpuset_t); 1927 csa.mask = (cpuset_t *) args->user_mask_ptr; 1928 1929 return (sys_cpuset_setaffinity(td, &csa)); 1930 } 1931