1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 Søren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_compat.h" 34 #include "opt_kdtrace.h" 35 36 #include <sys/param.h> 37 #include <sys/blist.h> 38 #include <sys/fcntl.h> 39 #if defined(__i386__) 40 #include <sys/imgact_aout.h> 41 #endif 42 #include <sys/jail.h> 43 #include <sys/kernel.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/namei.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/reboot.h> 54 #include <sys/racct.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/sdt.h> 58 #include <sys/signalvar.h> 59 #include <sys/stat.h> 60 #include <sys/syscallsubr.h> 61 #include <sys/sysctl.h> 62 #include <sys/sysproto.h> 63 #include <sys/systm.h> 64 #include <sys/time.h> 65 #include <sys/vmmeter.h> 66 #include <sys/vnode.h> 67 #include <sys/wait.h> 68 #include <sys/cpuset.h> 69 70 #include <security/mac/mac_framework.h> 71 72 #include <vm/vm.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_kern.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_object.h> 78 #include <vm/swap_pager.h> 79 80 #ifdef COMPAT_LINUX32 81 #include <machine/../linux32/linux.h> 82 #include <machine/../linux32/linux32_proto.h> 83 #else 84 #include <machine/../linux/linux.h> 85 #include <machine/../linux/linux_proto.h> 86 #endif 87 88 #include <compat/linux/linux_dtrace.h> 89 #include <compat/linux/linux_file.h> 90 #include <compat/linux/linux_mib.h> 91 #include <compat/linux/linux_signal.h> 92 #include <compat/linux/linux_util.h> 93 #include <compat/linux/linux_sysproto.h> 94 #include <compat/linux/linux_emul.h> 95 #include <compat/linux/linux_misc.h> 96 97 /* DTrace init */ 98 LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE); 99 100 /* Linuxulator-global DTrace probes */ 101 LIN_SDT_PROBE_DECLARE(locks, emul_lock, locked); 102 LIN_SDT_PROBE_DECLARE(locks, emul_lock, unlock); 103 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, locked); 104 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, unlock); 105 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, locked); 106 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, unlock); 107 108 int stclohz; /* Statistics clock frequency */ 109 110 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 111 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 112 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 113 RLIMIT_MEMLOCK, RLIMIT_AS 114 }; 115 116 struct l_sysinfo { 117 l_long uptime; /* Seconds since boot */ 118 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 119 #define LINUX_SYSINFO_LOADS_SCALE 65536 120 l_ulong totalram; /* Total usable main memory size */ 121 l_ulong freeram; /* Available memory size */ 122 l_ulong sharedram; /* Amount of shared memory */ 123 l_ulong bufferram; /* Memory used by buffers */ 124 l_ulong totalswap; /* Total swap space size */ 125 l_ulong freeswap; /* swap space still available */ 126 l_ushort procs; /* Number of current processes */ 127 l_ushort pads; 128 l_ulong totalbig; 129 l_ulong freebig; 130 l_uint mem_unit; 131 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 132 }; 133 int 134 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 135 { 136 struct l_sysinfo sysinfo; 137 vm_object_t object; 138 int i, j; 139 struct timespec ts; 140 141 getnanouptime(&ts); 142 if (ts.tv_nsec != 0) 143 ts.tv_sec++; 144 sysinfo.uptime = ts.tv_sec; 145 146 /* Use the information from the mib to get our load averages */ 147 for (i = 0; i < 3; i++) 148 sysinfo.loads[i] = averunnable.ldavg[i] * 149 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 150 151 sysinfo.totalram = physmem * PAGE_SIZE; 152 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 153 154 sysinfo.sharedram = 0; 155 mtx_lock(&vm_object_list_mtx); 156 TAILQ_FOREACH(object, &vm_object_list, object_list) 157 if (object->shadow_count > 1) 158 sysinfo.sharedram += object->resident_page_count; 159 mtx_unlock(&vm_object_list_mtx); 160 161 sysinfo.sharedram *= PAGE_SIZE; 162 sysinfo.bufferram = 0; 163 164 swap_pager_status(&i, &j); 165 sysinfo.totalswap = i * PAGE_SIZE; 166 sysinfo.freeswap = (i - j) * PAGE_SIZE; 167 168 sysinfo.procs = nprocs; 169 170 /* The following are only present in newer Linux kernels. */ 171 sysinfo.totalbig = 0; 172 sysinfo.freebig = 0; 173 sysinfo.mem_unit = 1; 174 175 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 176 } 177 178 int 179 linux_alarm(struct thread *td, struct linux_alarm_args *args) 180 { 181 struct itimerval it, old_it; 182 u_int secs; 183 int error; 184 185 #ifdef DEBUG 186 if (ldebug(alarm)) 187 printf(ARGS(alarm, "%u"), args->secs); 188 #endif 189 190 secs = args->secs; 191 192 if (secs > INT_MAX) 193 secs = INT_MAX; 194 195 it.it_value.tv_sec = (long) secs; 196 it.it_value.tv_usec = 0; 197 it.it_interval.tv_sec = 0; 198 it.it_interval.tv_usec = 0; 199 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 200 if (error) 201 return (error); 202 if (timevalisset(&old_it.it_value)) { 203 if (old_it.it_value.tv_usec != 0) 204 old_it.it_value.tv_sec++; 205 td->td_retval[0] = old_it.it_value.tv_sec; 206 } 207 return (0); 208 } 209 210 int 211 linux_brk(struct thread *td, struct linux_brk_args *args) 212 { 213 struct vmspace *vm = td->td_proc->p_vmspace; 214 vm_offset_t new, old; 215 struct obreak_args /* { 216 char * nsize; 217 } */ tmp; 218 219 #ifdef DEBUG 220 if (ldebug(brk)) 221 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 222 #endif 223 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 224 new = (vm_offset_t)args->dsend; 225 tmp.nsize = (char *)new; 226 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 227 td->td_retval[0] = (long)new; 228 else 229 td->td_retval[0] = (long)old; 230 231 return (0); 232 } 233 234 #if defined(__i386__) 235 /* XXX: what about amd64/linux32? */ 236 237 int 238 linux_uselib(struct thread *td, struct linux_uselib_args *args) 239 { 240 struct nameidata ni; 241 struct vnode *vp; 242 struct exec *a_out; 243 struct vattr attr; 244 vm_offset_t vmaddr; 245 unsigned long file_offset; 246 unsigned long bss_size; 247 char *library; 248 ssize_t aresid; 249 int error, locked, writecount; 250 251 LCONVPATHEXIST(td, args->library, &library); 252 253 #ifdef DEBUG 254 if (ldebug(uselib)) 255 printf(ARGS(uselib, "%s"), library); 256 #endif 257 258 a_out = NULL; 259 locked = 0; 260 vp = NULL; 261 262 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 263 UIO_SYSSPACE, library, td); 264 error = namei(&ni); 265 LFREEPATH(library); 266 if (error) 267 goto cleanup; 268 269 vp = ni.ni_vp; 270 NDFREE(&ni, NDF_ONLY_PNBUF); 271 272 /* 273 * From here on down, we have a locked vnode that must be unlocked. 274 * XXX: The code below largely duplicates exec_check_permissions(). 275 */ 276 locked = 1; 277 278 /* Writable? */ 279 error = VOP_GET_WRITECOUNT(vp, &writecount); 280 if (error != 0) 281 goto cleanup; 282 if (writecount != 0) { 283 error = ETXTBSY; 284 goto cleanup; 285 } 286 287 /* Executable? */ 288 error = VOP_GETATTR(vp, &attr, td->td_ucred); 289 if (error) 290 goto cleanup; 291 292 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 293 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 294 /* EACCESS is what exec(2) returns. */ 295 error = ENOEXEC; 296 goto cleanup; 297 } 298 299 /* Sensible size? */ 300 if (attr.va_size == 0) { 301 error = ENOEXEC; 302 goto cleanup; 303 } 304 305 /* Can we access it? */ 306 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 307 if (error) 308 goto cleanup; 309 310 /* 311 * XXX: This should use vn_open() so that it is properly authorized, 312 * and to reduce code redundancy all over the place here. 313 * XXX: Not really, it duplicates far more of exec_check_permissions() 314 * than vn_open(). 315 */ 316 #ifdef MAC 317 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 318 if (error) 319 goto cleanup; 320 #endif 321 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 322 if (error) 323 goto cleanup; 324 325 /* Pull in executable header into exec_map */ 326 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 327 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 328 if (error) 329 goto cleanup; 330 331 /* Is it a Linux binary ? */ 332 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 333 error = ENOEXEC; 334 goto cleanup; 335 } 336 337 /* 338 * While we are here, we should REALLY do some more checks 339 */ 340 341 /* Set file/virtual offset based on a.out variant. */ 342 switch ((int)(a_out->a_magic & 0xffff)) { 343 case 0413: /* ZMAGIC */ 344 file_offset = 1024; 345 break; 346 case 0314: /* QMAGIC */ 347 file_offset = 0; 348 break; 349 default: 350 error = ENOEXEC; 351 goto cleanup; 352 } 353 354 bss_size = round_page(a_out->a_bss); 355 356 /* Check various fields in header for validity/bounds. */ 357 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 358 error = ENOEXEC; 359 goto cleanup; 360 } 361 362 /* text + data can't exceed file size */ 363 if (a_out->a_data + a_out->a_text > attr.va_size) { 364 error = EFAULT; 365 goto cleanup; 366 } 367 368 /* 369 * text/data/bss must not exceed limits 370 * XXX - this is not complete. it should check current usage PLUS 371 * the resources needed by this library. 372 */ 373 PROC_LOCK(td->td_proc); 374 if (a_out->a_text > maxtsiz || 375 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA) || 376 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 377 bss_size) != 0) { 378 PROC_UNLOCK(td->td_proc); 379 error = ENOMEM; 380 goto cleanup; 381 } 382 PROC_UNLOCK(td->td_proc); 383 384 /* 385 * Prevent more writers. 386 * XXX: Note that if any of the VM operations fail below we don't 387 * clear this flag. 388 */ 389 VOP_SET_TEXT(vp); 390 391 /* 392 * Lock no longer needed 393 */ 394 locked = 0; 395 VOP_UNLOCK(vp, 0); 396 397 /* 398 * Check if file_offset page aligned. Currently we cannot handle 399 * misalinged file offsets, and so we read in the entire image 400 * (what a waste). 401 */ 402 if (file_offset & PAGE_MASK) { 403 #ifdef DEBUG 404 printf("uselib: Non page aligned binary %lu\n", file_offset); 405 #endif 406 /* Map text+data read/write/execute */ 407 408 /* a_entry is the load address and is page aligned */ 409 vmaddr = trunc_page(a_out->a_entry); 410 411 /* get anon user mapping, read+write+execute */ 412 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 413 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 414 VM_PROT_ALL, VM_PROT_ALL, 0); 415 if (error) 416 goto cleanup; 417 418 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 419 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 420 td->td_ucred, NOCRED, &aresid, td); 421 if (error != 0) 422 goto cleanup; 423 if (aresid != 0) { 424 error = ENOEXEC; 425 goto cleanup; 426 } 427 } else { 428 #ifdef DEBUG 429 printf("uselib: Page aligned binary %lu\n", file_offset); 430 #endif 431 /* 432 * for QMAGIC, a_entry is 20 bytes beyond the load address 433 * to skip the executable header 434 */ 435 vmaddr = trunc_page(a_out->a_entry); 436 437 /* 438 * Map it all into the process's space as a single 439 * copy-on-write "data" segment. 440 */ 441 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 442 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 443 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 444 if (error) 445 goto cleanup; 446 } 447 #ifdef DEBUG 448 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 449 ((long *)vmaddr)[1]); 450 #endif 451 if (bss_size != 0) { 452 /* Calculate BSS start address */ 453 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 454 a_out->a_data; 455 456 /* allocate some 'anon' space */ 457 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 458 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 459 VM_PROT_ALL, 0); 460 if (error) 461 goto cleanup; 462 } 463 464 cleanup: 465 /* Unlock vnode if needed */ 466 if (locked) 467 VOP_UNLOCK(vp, 0); 468 469 /* Release the temporary mapping. */ 470 if (a_out) 471 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 472 473 return (error); 474 } 475 476 #endif /* __i386__ */ 477 478 int 479 linux_select(struct thread *td, struct linux_select_args *args) 480 { 481 l_timeval ltv; 482 struct timeval tv0, tv1, utv, *tvp; 483 int error; 484 485 #ifdef DEBUG 486 if (ldebug(select)) 487 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 488 (void *)args->readfds, (void *)args->writefds, 489 (void *)args->exceptfds, (void *)args->timeout); 490 #endif 491 492 /* 493 * Store current time for computation of the amount of 494 * time left. 495 */ 496 if (args->timeout) { 497 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 498 goto select_out; 499 utv.tv_sec = ltv.tv_sec; 500 utv.tv_usec = ltv.tv_usec; 501 #ifdef DEBUG 502 if (ldebug(select)) 503 printf(LMSG("incoming timeout (%jd/%ld)"), 504 (intmax_t)utv.tv_sec, utv.tv_usec); 505 #endif 506 507 if (itimerfix(&utv)) { 508 /* 509 * The timeval was invalid. Convert it to something 510 * valid that will act as it does under Linux. 511 */ 512 utv.tv_sec += utv.tv_usec / 1000000; 513 utv.tv_usec %= 1000000; 514 if (utv.tv_usec < 0) { 515 utv.tv_sec -= 1; 516 utv.tv_usec += 1000000; 517 } 518 if (utv.tv_sec < 0) 519 timevalclear(&utv); 520 } 521 microtime(&tv0); 522 tvp = &utv; 523 } else 524 tvp = NULL; 525 526 error = kern_select(td, args->nfds, args->readfds, args->writefds, 527 args->exceptfds, tvp, sizeof(l_int) * 8); 528 529 #ifdef DEBUG 530 if (ldebug(select)) 531 printf(LMSG("real select returns %d"), error); 532 #endif 533 if (error) 534 goto select_out; 535 536 if (args->timeout) { 537 if (td->td_retval[0]) { 538 /* 539 * Compute how much time was left of the timeout, 540 * by subtracting the current time and the time 541 * before we started the call, and subtracting 542 * that result from the user-supplied value. 543 */ 544 microtime(&tv1); 545 timevalsub(&tv1, &tv0); 546 timevalsub(&utv, &tv1); 547 if (utv.tv_sec < 0) 548 timevalclear(&utv); 549 } else 550 timevalclear(&utv); 551 #ifdef DEBUG 552 if (ldebug(select)) 553 printf(LMSG("outgoing timeout (%jd/%ld)"), 554 (intmax_t)utv.tv_sec, utv.tv_usec); 555 #endif 556 ltv.tv_sec = utv.tv_sec; 557 ltv.tv_usec = utv.tv_usec; 558 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 559 goto select_out; 560 } 561 562 select_out: 563 #ifdef DEBUG 564 if (ldebug(select)) 565 printf(LMSG("select_out -> %d"), error); 566 #endif 567 return (error); 568 } 569 570 int 571 linux_mremap(struct thread *td, struct linux_mremap_args *args) 572 { 573 struct munmap_args /* { 574 void *addr; 575 size_t len; 576 } */ bsd_args; 577 int error = 0; 578 579 #ifdef DEBUG 580 if (ldebug(mremap)) 581 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 582 (void *)(uintptr_t)args->addr, 583 (unsigned long)args->old_len, 584 (unsigned long)args->new_len, 585 (unsigned long)args->flags); 586 #endif 587 588 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 589 td->td_retval[0] = 0; 590 return (EINVAL); 591 } 592 593 /* 594 * Check for the page alignment. 595 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 596 */ 597 if (args->addr & PAGE_MASK) { 598 td->td_retval[0] = 0; 599 return (EINVAL); 600 } 601 602 args->new_len = round_page(args->new_len); 603 args->old_len = round_page(args->old_len); 604 605 if (args->new_len > args->old_len) { 606 td->td_retval[0] = 0; 607 return (ENOMEM); 608 } 609 610 if (args->new_len < args->old_len) { 611 bsd_args.addr = 612 (caddr_t)((uintptr_t)args->addr + args->new_len); 613 bsd_args.len = args->old_len - args->new_len; 614 error = sys_munmap(td, &bsd_args); 615 } 616 617 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 618 return (error); 619 } 620 621 #define LINUX_MS_ASYNC 0x0001 622 #define LINUX_MS_INVALIDATE 0x0002 623 #define LINUX_MS_SYNC 0x0004 624 625 int 626 linux_msync(struct thread *td, struct linux_msync_args *args) 627 { 628 struct msync_args bsd_args; 629 630 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 631 bsd_args.len = (uintptr_t)args->len; 632 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 633 634 return (sys_msync(td, &bsd_args)); 635 } 636 637 int 638 linux_time(struct thread *td, struct linux_time_args *args) 639 { 640 struct timeval tv; 641 l_time_t tm; 642 int error; 643 644 #ifdef DEBUG 645 if (ldebug(time)) 646 printf(ARGS(time, "*")); 647 #endif 648 649 microtime(&tv); 650 tm = tv.tv_sec; 651 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 652 return (error); 653 td->td_retval[0] = tm; 654 return (0); 655 } 656 657 struct l_times_argv { 658 l_clock_t tms_utime; 659 l_clock_t tms_stime; 660 l_clock_t tms_cutime; 661 l_clock_t tms_cstime; 662 }; 663 664 665 /* 666 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 667 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 668 * auxiliary vector entry. 669 */ 670 #define CLK_TCK 100 671 672 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 673 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 674 675 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 676 CONVNTCK(r) : CONVOTCK(r)) 677 678 int 679 linux_times(struct thread *td, struct linux_times_args *args) 680 { 681 struct timeval tv, utime, stime, cutime, cstime; 682 struct l_times_argv tms; 683 struct proc *p; 684 int error; 685 686 #ifdef DEBUG 687 if (ldebug(times)) 688 printf(ARGS(times, "*")); 689 #endif 690 691 if (args->buf != NULL) { 692 p = td->td_proc; 693 PROC_LOCK(p); 694 PROC_SLOCK(p); 695 calcru(p, &utime, &stime); 696 PROC_SUNLOCK(p); 697 calccru(p, &cutime, &cstime); 698 PROC_UNLOCK(p); 699 700 tms.tms_utime = CONVTCK(utime); 701 tms.tms_stime = CONVTCK(stime); 702 703 tms.tms_cutime = CONVTCK(cutime); 704 tms.tms_cstime = CONVTCK(cstime); 705 706 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 707 return (error); 708 } 709 710 microuptime(&tv); 711 td->td_retval[0] = (int)CONVTCK(tv); 712 return (0); 713 } 714 715 int 716 linux_newuname(struct thread *td, struct linux_newuname_args *args) 717 { 718 struct l_new_utsname utsname; 719 char osname[LINUX_MAX_UTSNAME]; 720 char osrelease[LINUX_MAX_UTSNAME]; 721 char *p; 722 723 #ifdef DEBUG 724 if (ldebug(newuname)) 725 printf(ARGS(newuname, "*")); 726 #endif 727 728 linux_get_osname(td, osname); 729 linux_get_osrelease(td, osrelease); 730 731 bzero(&utsname, sizeof(utsname)); 732 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 733 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 734 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 735 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 736 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 737 for (p = utsname.version; *p != '\0'; ++p) 738 if (*p == '\n') { 739 *p = '\0'; 740 break; 741 } 742 strlcpy(utsname.machine, linux_platform, LINUX_MAX_UTSNAME); 743 744 return (copyout(&utsname, args->buf, sizeof(utsname))); 745 } 746 747 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 748 struct l_utimbuf { 749 l_time_t l_actime; 750 l_time_t l_modtime; 751 }; 752 753 int 754 linux_utime(struct thread *td, struct linux_utime_args *args) 755 { 756 struct timeval tv[2], *tvp; 757 struct l_utimbuf lut; 758 char *fname; 759 int error; 760 761 LCONVPATHEXIST(td, args->fname, &fname); 762 763 #ifdef DEBUG 764 if (ldebug(utime)) 765 printf(ARGS(utime, "%s, *"), fname); 766 #endif 767 768 if (args->times) { 769 if ((error = copyin(args->times, &lut, sizeof lut))) { 770 LFREEPATH(fname); 771 return (error); 772 } 773 tv[0].tv_sec = lut.l_actime; 774 tv[0].tv_usec = 0; 775 tv[1].tv_sec = lut.l_modtime; 776 tv[1].tv_usec = 0; 777 tvp = tv; 778 } else 779 tvp = NULL; 780 781 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 782 LFREEPATH(fname); 783 return (error); 784 } 785 786 int 787 linux_utimes(struct thread *td, struct linux_utimes_args *args) 788 { 789 l_timeval ltv[2]; 790 struct timeval tv[2], *tvp = NULL; 791 char *fname; 792 int error; 793 794 LCONVPATHEXIST(td, args->fname, &fname); 795 796 #ifdef DEBUG 797 if (ldebug(utimes)) 798 printf(ARGS(utimes, "%s, *"), fname); 799 #endif 800 801 if (args->tptr != NULL) { 802 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 803 LFREEPATH(fname); 804 return (error); 805 } 806 tv[0].tv_sec = ltv[0].tv_sec; 807 tv[0].tv_usec = ltv[0].tv_usec; 808 tv[1].tv_sec = ltv[1].tv_sec; 809 tv[1].tv_usec = ltv[1].tv_usec; 810 tvp = tv; 811 } 812 813 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 814 LFREEPATH(fname); 815 return (error); 816 } 817 818 int 819 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 820 { 821 l_timeval ltv[2]; 822 struct timeval tv[2], *tvp = NULL; 823 char *fname; 824 int error, dfd; 825 826 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 827 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 828 829 #ifdef DEBUG 830 if (ldebug(futimesat)) 831 printf(ARGS(futimesat, "%s, *"), fname); 832 #endif 833 834 if (args->utimes != NULL) { 835 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 836 LFREEPATH(fname); 837 return (error); 838 } 839 tv[0].tv_sec = ltv[0].tv_sec; 840 tv[0].tv_usec = ltv[0].tv_usec; 841 tv[1].tv_sec = ltv[1].tv_sec; 842 tv[1].tv_usec = ltv[1].tv_usec; 843 tvp = tv; 844 } 845 846 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 847 LFREEPATH(fname); 848 return (error); 849 } 850 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 851 852 int 853 linux_common_wait(struct thread *td, int pid, int *status, 854 int options, struct rusage *ru) 855 { 856 int error, tmpstat; 857 858 error = kern_wait(td, pid, &tmpstat, options, ru); 859 if (error) 860 return (error); 861 862 if (status) { 863 tmpstat &= 0xffff; 864 if (WIFSIGNALED(tmpstat)) 865 tmpstat = (tmpstat & 0xffffff80) | 866 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 867 else if (WIFSTOPPED(tmpstat)) 868 tmpstat = (tmpstat & 0xffff00ff) | 869 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 870 error = copyout(&tmpstat, status, sizeof(int)); 871 } 872 873 return (error); 874 } 875 876 int 877 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 878 { 879 int options; 880 881 #ifdef DEBUG 882 if (ldebug(waitpid)) 883 printf(ARGS(waitpid, "%d, %p, %d"), 884 args->pid, (void *)args->status, args->options); 885 #endif 886 /* 887 * this is necessary because the test in kern_wait doesn't work 888 * because we mess with the options here 889 */ 890 if (args->options & ~(WUNTRACED | WNOHANG | WCONTINUED | __WCLONE)) 891 return (EINVAL); 892 893 options = (args->options & (WNOHANG | WUNTRACED)); 894 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 895 if (args->options & __WCLONE) 896 options |= WLINUXCLONE; 897 898 return (linux_common_wait(td, args->pid, args->status, options, NULL)); 899 } 900 901 902 int 903 linux_mknod(struct thread *td, struct linux_mknod_args *args) 904 { 905 char *path; 906 int error; 907 908 LCONVPATHCREAT(td, args->path, &path); 909 910 #ifdef DEBUG 911 if (ldebug(mknod)) 912 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 913 #endif 914 915 switch (args->mode & S_IFMT) { 916 case S_IFIFO: 917 case S_IFSOCK: 918 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 919 break; 920 921 case S_IFCHR: 922 case S_IFBLK: 923 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 924 args->dev); 925 break; 926 927 case S_IFDIR: 928 error = EPERM; 929 break; 930 931 case 0: 932 args->mode |= S_IFREG; 933 /* FALLTHROUGH */ 934 case S_IFREG: 935 error = kern_open(td, path, UIO_SYSSPACE, 936 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 937 if (error == 0) 938 kern_close(td, td->td_retval[0]); 939 break; 940 941 default: 942 error = EINVAL; 943 break; 944 } 945 LFREEPATH(path); 946 return (error); 947 } 948 949 int 950 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 951 { 952 char *path; 953 int error, dfd; 954 955 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 956 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 957 958 #ifdef DEBUG 959 if (ldebug(mknodat)) 960 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 961 #endif 962 963 switch (args->mode & S_IFMT) { 964 case S_IFIFO: 965 case S_IFSOCK: 966 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 967 break; 968 969 case S_IFCHR: 970 case S_IFBLK: 971 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 972 args->dev); 973 break; 974 975 case S_IFDIR: 976 error = EPERM; 977 break; 978 979 case 0: 980 args->mode |= S_IFREG; 981 /* FALLTHROUGH */ 982 case S_IFREG: 983 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 984 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 985 if (error == 0) 986 kern_close(td, td->td_retval[0]); 987 break; 988 989 default: 990 error = EINVAL; 991 break; 992 } 993 LFREEPATH(path); 994 return (error); 995 } 996 997 /* 998 * UGH! This is just about the dumbest idea I've ever heard!! 999 */ 1000 int 1001 linux_personality(struct thread *td, struct linux_personality_args *args) 1002 { 1003 #ifdef DEBUG 1004 if (ldebug(personality)) 1005 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 1006 #endif 1007 if (args->per != 0) 1008 return (EINVAL); 1009 1010 /* Yes Jim, it's still a Linux... */ 1011 td->td_retval[0] = 0; 1012 return (0); 1013 } 1014 1015 struct l_itimerval { 1016 l_timeval it_interval; 1017 l_timeval it_value; 1018 }; 1019 1020 #define B2L_ITIMERVAL(bip, lip) \ 1021 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1022 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1023 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1024 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1025 1026 int 1027 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1028 { 1029 int error; 1030 struct l_itimerval ls; 1031 struct itimerval aitv, oitv; 1032 1033 #ifdef DEBUG 1034 if (ldebug(setitimer)) 1035 printf(ARGS(setitimer, "%p, %p"), 1036 (void *)uap->itv, (void *)uap->oitv); 1037 #endif 1038 1039 if (uap->itv == NULL) { 1040 uap->itv = uap->oitv; 1041 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1042 } 1043 1044 error = copyin(uap->itv, &ls, sizeof(ls)); 1045 if (error != 0) 1046 return (error); 1047 B2L_ITIMERVAL(&aitv, &ls); 1048 #ifdef DEBUG 1049 if (ldebug(setitimer)) { 1050 printf("setitimer: value: sec: %jd, usec: %ld\n", 1051 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1052 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1053 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1054 } 1055 #endif 1056 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1057 if (error != 0 || uap->oitv == NULL) 1058 return (error); 1059 B2L_ITIMERVAL(&ls, &oitv); 1060 1061 return (copyout(&ls, uap->oitv, sizeof(ls))); 1062 } 1063 1064 int 1065 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1066 { 1067 int error; 1068 struct l_itimerval ls; 1069 struct itimerval aitv; 1070 1071 #ifdef DEBUG 1072 if (ldebug(getitimer)) 1073 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1074 #endif 1075 error = kern_getitimer(td, uap->which, &aitv); 1076 if (error != 0) 1077 return (error); 1078 B2L_ITIMERVAL(&ls, &aitv); 1079 return (copyout(&ls, uap->itv, sizeof(ls))); 1080 } 1081 1082 int 1083 linux_nice(struct thread *td, struct linux_nice_args *args) 1084 { 1085 struct setpriority_args bsd_args; 1086 1087 bsd_args.which = PRIO_PROCESS; 1088 bsd_args.who = 0; /* current process */ 1089 bsd_args.prio = args->inc; 1090 return (sys_setpriority(td, &bsd_args)); 1091 } 1092 1093 int 1094 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1095 { 1096 struct ucred *newcred, *oldcred; 1097 l_gid_t *linux_gidset; 1098 gid_t *bsd_gidset; 1099 int ngrp, error; 1100 struct proc *p; 1101 1102 ngrp = args->gidsetsize; 1103 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1104 return (EINVAL); 1105 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_TEMP, M_WAITOK); 1106 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1107 if (error) 1108 goto out; 1109 newcred = crget(); 1110 p = td->td_proc; 1111 PROC_LOCK(p); 1112 oldcred = crcopysafe(p, newcred); 1113 1114 /* 1115 * cr_groups[0] holds egid. Setting the whole set from 1116 * the supplied set will cause egid to be changed too. 1117 * Keep cr_groups[0] unchanged to prevent that. 1118 */ 1119 1120 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1121 PROC_UNLOCK(p); 1122 crfree(newcred); 1123 goto out; 1124 } 1125 1126 if (ngrp > 0) { 1127 newcred->cr_ngroups = ngrp + 1; 1128 1129 bsd_gidset = newcred->cr_groups; 1130 ngrp--; 1131 while (ngrp >= 0) { 1132 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1133 ngrp--; 1134 } 1135 } else 1136 newcred->cr_ngroups = 1; 1137 1138 setsugid(p); 1139 p->p_ucred = newcred; 1140 PROC_UNLOCK(p); 1141 crfree(oldcred); 1142 error = 0; 1143 out: 1144 free(linux_gidset, M_TEMP); 1145 return (error); 1146 } 1147 1148 int 1149 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1150 { 1151 struct ucred *cred; 1152 l_gid_t *linux_gidset; 1153 gid_t *bsd_gidset; 1154 int bsd_gidsetsz, ngrp, error; 1155 1156 cred = td->td_ucred; 1157 bsd_gidset = cred->cr_groups; 1158 bsd_gidsetsz = cred->cr_ngroups - 1; 1159 1160 /* 1161 * cr_groups[0] holds egid. Returning the whole set 1162 * here will cause a duplicate. Exclude cr_groups[0] 1163 * to prevent that. 1164 */ 1165 1166 if ((ngrp = args->gidsetsize) == 0) { 1167 td->td_retval[0] = bsd_gidsetsz; 1168 return (0); 1169 } 1170 1171 if (ngrp < bsd_gidsetsz) 1172 return (EINVAL); 1173 1174 ngrp = 0; 1175 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1176 M_TEMP, M_WAITOK); 1177 while (ngrp < bsd_gidsetsz) { 1178 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1179 ngrp++; 1180 } 1181 1182 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1183 free(linux_gidset, M_TEMP); 1184 if (error) 1185 return (error); 1186 1187 td->td_retval[0] = ngrp; 1188 return (0); 1189 } 1190 1191 int 1192 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1193 { 1194 struct rlimit bsd_rlim; 1195 struct l_rlimit rlim; 1196 u_int which; 1197 int error; 1198 1199 #ifdef DEBUG 1200 if (ldebug(setrlimit)) 1201 printf(ARGS(setrlimit, "%d, %p"), 1202 args->resource, (void *)args->rlim); 1203 #endif 1204 1205 if (args->resource >= LINUX_RLIM_NLIMITS) 1206 return (EINVAL); 1207 1208 which = linux_to_bsd_resource[args->resource]; 1209 if (which == -1) 1210 return (EINVAL); 1211 1212 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1213 if (error) 1214 return (error); 1215 1216 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1217 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1218 return (kern_setrlimit(td, which, &bsd_rlim)); 1219 } 1220 1221 int 1222 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1223 { 1224 struct l_rlimit rlim; 1225 struct proc *p = td->td_proc; 1226 struct rlimit bsd_rlim; 1227 u_int which; 1228 1229 #ifdef DEBUG 1230 if (ldebug(old_getrlimit)) 1231 printf(ARGS(old_getrlimit, "%d, %p"), 1232 args->resource, (void *)args->rlim); 1233 #endif 1234 1235 if (args->resource >= LINUX_RLIM_NLIMITS) 1236 return (EINVAL); 1237 1238 which = linux_to_bsd_resource[args->resource]; 1239 if (which == -1) 1240 return (EINVAL); 1241 1242 PROC_LOCK(p); 1243 lim_rlimit(p, which, &bsd_rlim); 1244 PROC_UNLOCK(p); 1245 1246 #ifdef COMPAT_LINUX32 1247 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1248 if (rlim.rlim_cur == UINT_MAX) 1249 rlim.rlim_cur = INT_MAX; 1250 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1251 if (rlim.rlim_max == UINT_MAX) 1252 rlim.rlim_max = INT_MAX; 1253 #else 1254 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1255 if (rlim.rlim_cur == ULONG_MAX) 1256 rlim.rlim_cur = LONG_MAX; 1257 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1258 if (rlim.rlim_max == ULONG_MAX) 1259 rlim.rlim_max = LONG_MAX; 1260 #endif 1261 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1262 } 1263 1264 int 1265 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1266 { 1267 struct l_rlimit rlim; 1268 struct proc *p = td->td_proc; 1269 struct rlimit bsd_rlim; 1270 u_int which; 1271 1272 #ifdef DEBUG 1273 if (ldebug(getrlimit)) 1274 printf(ARGS(getrlimit, "%d, %p"), 1275 args->resource, (void *)args->rlim); 1276 #endif 1277 1278 if (args->resource >= LINUX_RLIM_NLIMITS) 1279 return (EINVAL); 1280 1281 which = linux_to_bsd_resource[args->resource]; 1282 if (which == -1) 1283 return (EINVAL); 1284 1285 PROC_LOCK(p); 1286 lim_rlimit(p, which, &bsd_rlim); 1287 PROC_UNLOCK(p); 1288 1289 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1290 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1291 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1292 } 1293 1294 int 1295 linux_sched_setscheduler(struct thread *td, 1296 struct linux_sched_setscheduler_args *args) 1297 { 1298 struct sched_setscheduler_args bsd; 1299 1300 #ifdef DEBUG 1301 if (ldebug(sched_setscheduler)) 1302 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1303 args->pid, args->policy, (const void *)args->param); 1304 #endif 1305 1306 switch (args->policy) { 1307 case LINUX_SCHED_OTHER: 1308 bsd.policy = SCHED_OTHER; 1309 break; 1310 case LINUX_SCHED_FIFO: 1311 bsd.policy = SCHED_FIFO; 1312 break; 1313 case LINUX_SCHED_RR: 1314 bsd.policy = SCHED_RR; 1315 break; 1316 default: 1317 return (EINVAL); 1318 } 1319 1320 bsd.pid = args->pid; 1321 bsd.param = (struct sched_param *)args->param; 1322 return (sys_sched_setscheduler(td, &bsd)); 1323 } 1324 1325 int 1326 linux_sched_getscheduler(struct thread *td, 1327 struct linux_sched_getscheduler_args *args) 1328 { 1329 struct sched_getscheduler_args bsd; 1330 int error; 1331 1332 #ifdef DEBUG 1333 if (ldebug(sched_getscheduler)) 1334 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1335 #endif 1336 1337 bsd.pid = args->pid; 1338 error = sys_sched_getscheduler(td, &bsd); 1339 1340 switch (td->td_retval[0]) { 1341 case SCHED_OTHER: 1342 td->td_retval[0] = LINUX_SCHED_OTHER; 1343 break; 1344 case SCHED_FIFO: 1345 td->td_retval[0] = LINUX_SCHED_FIFO; 1346 break; 1347 case SCHED_RR: 1348 td->td_retval[0] = LINUX_SCHED_RR; 1349 break; 1350 } 1351 1352 return (error); 1353 } 1354 1355 int 1356 linux_sched_get_priority_max(struct thread *td, 1357 struct linux_sched_get_priority_max_args *args) 1358 { 1359 struct sched_get_priority_max_args bsd; 1360 1361 #ifdef DEBUG 1362 if (ldebug(sched_get_priority_max)) 1363 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1364 #endif 1365 1366 switch (args->policy) { 1367 case LINUX_SCHED_OTHER: 1368 bsd.policy = SCHED_OTHER; 1369 break; 1370 case LINUX_SCHED_FIFO: 1371 bsd.policy = SCHED_FIFO; 1372 break; 1373 case LINUX_SCHED_RR: 1374 bsd.policy = SCHED_RR; 1375 break; 1376 default: 1377 return (EINVAL); 1378 } 1379 return (sys_sched_get_priority_max(td, &bsd)); 1380 } 1381 1382 int 1383 linux_sched_get_priority_min(struct thread *td, 1384 struct linux_sched_get_priority_min_args *args) 1385 { 1386 struct sched_get_priority_min_args bsd; 1387 1388 #ifdef DEBUG 1389 if (ldebug(sched_get_priority_min)) 1390 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1391 #endif 1392 1393 switch (args->policy) { 1394 case LINUX_SCHED_OTHER: 1395 bsd.policy = SCHED_OTHER; 1396 break; 1397 case LINUX_SCHED_FIFO: 1398 bsd.policy = SCHED_FIFO; 1399 break; 1400 case LINUX_SCHED_RR: 1401 bsd.policy = SCHED_RR; 1402 break; 1403 default: 1404 return (EINVAL); 1405 } 1406 return (sys_sched_get_priority_min(td, &bsd)); 1407 } 1408 1409 #define REBOOT_CAD_ON 0x89abcdef 1410 #define REBOOT_CAD_OFF 0 1411 #define REBOOT_HALT 0xcdef0123 1412 #define REBOOT_RESTART 0x01234567 1413 #define REBOOT_RESTART2 0xA1B2C3D4 1414 #define REBOOT_POWEROFF 0x4321FEDC 1415 #define REBOOT_MAGIC1 0xfee1dead 1416 #define REBOOT_MAGIC2 0x28121969 1417 #define REBOOT_MAGIC2A 0x05121996 1418 #define REBOOT_MAGIC2B 0x16041998 1419 1420 int 1421 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1422 { 1423 struct reboot_args bsd_args; 1424 1425 #ifdef DEBUG 1426 if (ldebug(reboot)) 1427 printf(ARGS(reboot, "0x%x"), args->cmd); 1428 #endif 1429 1430 if (args->magic1 != REBOOT_MAGIC1) 1431 return (EINVAL); 1432 1433 switch (args->magic2) { 1434 case REBOOT_MAGIC2: 1435 case REBOOT_MAGIC2A: 1436 case REBOOT_MAGIC2B: 1437 break; 1438 default: 1439 return (EINVAL); 1440 } 1441 1442 switch (args->cmd) { 1443 case REBOOT_CAD_ON: 1444 case REBOOT_CAD_OFF: 1445 return (priv_check(td, PRIV_REBOOT)); 1446 case REBOOT_HALT: 1447 bsd_args.opt = RB_HALT; 1448 break; 1449 case REBOOT_RESTART: 1450 case REBOOT_RESTART2: 1451 bsd_args.opt = 0; 1452 break; 1453 case REBOOT_POWEROFF: 1454 bsd_args.opt = RB_POWEROFF; 1455 break; 1456 default: 1457 return (EINVAL); 1458 } 1459 return (sys_reboot(td, &bsd_args)); 1460 } 1461 1462 1463 /* 1464 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1465 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1466 * are assumed to be preserved. The following lightweight syscalls fixes 1467 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1468 * 1469 * linux_getpid() - MP SAFE 1470 * linux_getgid() - MP SAFE 1471 * linux_getuid() - MP SAFE 1472 */ 1473 1474 int 1475 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1476 { 1477 struct linux_emuldata *em; 1478 1479 #ifdef DEBUG 1480 if (ldebug(getpid)) 1481 printf(ARGS(getpid, "")); 1482 #endif 1483 1484 if (linux_use26(td)) { 1485 em = em_find(td->td_proc, EMUL_DONTLOCK); 1486 KASSERT(em != NULL, ("getpid: emuldata not found.\n")); 1487 td->td_retval[0] = em->shared->group_pid; 1488 } else { 1489 td->td_retval[0] = td->td_proc->p_pid; 1490 } 1491 1492 return (0); 1493 } 1494 1495 int 1496 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1497 { 1498 1499 #ifdef DEBUG 1500 if (ldebug(gettid)) 1501 printf(ARGS(gettid, "")); 1502 #endif 1503 1504 td->td_retval[0] = td->td_proc->p_pid; 1505 return (0); 1506 } 1507 1508 1509 int 1510 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1511 { 1512 struct linux_emuldata *em; 1513 struct proc *p, *pp; 1514 1515 #ifdef DEBUG 1516 if (ldebug(getppid)) 1517 printf(ARGS(getppid, "")); 1518 #endif 1519 1520 if (!linux_use26(td)) { 1521 PROC_LOCK(td->td_proc); 1522 td->td_retval[0] = td->td_proc->p_pptr->p_pid; 1523 PROC_UNLOCK(td->td_proc); 1524 return (0); 1525 } 1526 1527 em = em_find(td->td_proc, EMUL_DONTLOCK); 1528 1529 KASSERT(em != NULL, ("getppid: process emuldata not found.\n")); 1530 1531 /* find the group leader */ 1532 p = pfind(em->shared->group_pid); 1533 1534 if (p == NULL) { 1535 #ifdef DEBUG 1536 printf(LMSG("parent process not found.\n")); 1537 #endif 1538 return (0); 1539 } 1540 1541 pp = p->p_pptr; /* switch to parent */ 1542 PROC_LOCK(pp); 1543 PROC_UNLOCK(p); 1544 1545 /* if its also linux process */ 1546 if (pp->p_sysent == &elf_linux_sysvec) { 1547 em = em_find(pp, EMUL_DONTLOCK); 1548 KASSERT(em != NULL, ("getppid: parent emuldata not found.\n")); 1549 1550 td->td_retval[0] = em->shared->group_pid; 1551 } else 1552 td->td_retval[0] = pp->p_pid; 1553 1554 PROC_UNLOCK(pp); 1555 1556 return (0); 1557 } 1558 1559 int 1560 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1561 { 1562 1563 #ifdef DEBUG 1564 if (ldebug(getgid)) 1565 printf(ARGS(getgid, "")); 1566 #endif 1567 1568 td->td_retval[0] = td->td_ucred->cr_rgid; 1569 return (0); 1570 } 1571 1572 int 1573 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1574 { 1575 1576 #ifdef DEBUG 1577 if (ldebug(getuid)) 1578 printf(ARGS(getuid, "")); 1579 #endif 1580 1581 td->td_retval[0] = td->td_ucred->cr_ruid; 1582 return (0); 1583 } 1584 1585 1586 int 1587 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1588 { 1589 struct getsid_args bsd; 1590 1591 #ifdef DEBUG 1592 if (ldebug(getsid)) 1593 printf(ARGS(getsid, "%i"), args->pid); 1594 #endif 1595 1596 bsd.pid = args->pid; 1597 return (sys_getsid(td, &bsd)); 1598 } 1599 1600 int 1601 linux_nosys(struct thread *td, struct nosys_args *ignore) 1602 { 1603 1604 return (ENOSYS); 1605 } 1606 1607 int 1608 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1609 { 1610 struct getpriority_args bsd_args; 1611 int error; 1612 1613 #ifdef DEBUG 1614 if (ldebug(getpriority)) 1615 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1616 #endif 1617 1618 bsd_args.which = args->which; 1619 bsd_args.who = args->who; 1620 error = sys_getpriority(td, &bsd_args); 1621 td->td_retval[0] = 20 - td->td_retval[0]; 1622 return (error); 1623 } 1624 1625 int 1626 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1627 { 1628 int name[2]; 1629 1630 #ifdef DEBUG 1631 if (ldebug(sethostname)) 1632 printf(ARGS(sethostname, "*, %i"), args->len); 1633 #endif 1634 1635 name[0] = CTL_KERN; 1636 name[1] = KERN_HOSTNAME; 1637 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1638 args->len, 0, 0)); 1639 } 1640 1641 int 1642 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1643 { 1644 int name[2]; 1645 1646 #ifdef DEBUG 1647 if (ldebug(setdomainname)) 1648 printf(ARGS(setdomainname, "*, %i"), args->len); 1649 #endif 1650 1651 name[0] = CTL_KERN; 1652 name[1] = KERN_NISDOMAINNAME; 1653 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1654 args->len, 0, 0)); 1655 } 1656 1657 int 1658 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1659 { 1660 struct linux_emuldata *em; 1661 1662 #ifdef DEBUG 1663 if (ldebug(exit_group)) 1664 printf(ARGS(exit_group, "%i"), args->error_code); 1665 #endif 1666 1667 em = em_find(td->td_proc, EMUL_DONTLOCK); 1668 if (em->shared->refs > 1) { 1669 EMUL_SHARED_WLOCK(&emul_shared_lock); 1670 em->shared->flags |= EMUL_SHARED_HASXSTAT; 1671 em->shared->xstat = W_EXITCODE(args->error_code, 0); 1672 EMUL_SHARED_WUNLOCK(&emul_shared_lock); 1673 if (linux_use26(td)) 1674 linux_kill_threads(td, SIGKILL); 1675 } 1676 1677 /* 1678 * XXX: we should send a signal to the parent if 1679 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1680 * as it doesnt occur often. 1681 */ 1682 exit1(td, W_EXITCODE(args->error_code, 0)); 1683 1684 return (0); 1685 } 1686 1687 #define _LINUX_CAPABILITY_VERSION 0x19980330 1688 1689 struct l_user_cap_header { 1690 l_int version; 1691 l_int pid; 1692 }; 1693 1694 struct l_user_cap_data { 1695 l_int effective; 1696 l_int permitted; 1697 l_int inheritable; 1698 }; 1699 1700 int 1701 linux_capget(struct thread *td, struct linux_capget_args *args) 1702 { 1703 struct l_user_cap_header luch; 1704 struct l_user_cap_data lucd; 1705 int error; 1706 1707 if (args->hdrp == NULL) 1708 return (EFAULT); 1709 1710 error = copyin(args->hdrp, &luch, sizeof(luch)); 1711 if (error != 0) 1712 return (error); 1713 1714 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1715 luch.version = _LINUX_CAPABILITY_VERSION; 1716 error = copyout(&luch, args->hdrp, sizeof(luch)); 1717 if (error) 1718 return (error); 1719 return (EINVAL); 1720 } 1721 1722 if (luch.pid) 1723 return (EPERM); 1724 1725 if (args->datap) { 1726 /* 1727 * The current implementation doesn't support setting 1728 * a capability (it's essentially a stub) so indicate 1729 * that no capabilities are currently set or available 1730 * to request. 1731 */ 1732 bzero (&lucd, sizeof(lucd)); 1733 error = copyout(&lucd, args->datap, sizeof(lucd)); 1734 } 1735 1736 return (error); 1737 } 1738 1739 int 1740 linux_capset(struct thread *td, struct linux_capset_args *args) 1741 { 1742 struct l_user_cap_header luch; 1743 struct l_user_cap_data lucd; 1744 int error; 1745 1746 if (args->hdrp == NULL || args->datap == NULL) 1747 return (EFAULT); 1748 1749 error = copyin(args->hdrp, &luch, sizeof(luch)); 1750 if (error != 0) 1751 return (error); 1752 1753 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1754 luch.version = _LINUX_CAPABILITY_VERSION; 1755 error = copyout(&luch, args->hdrp, sizeof(luch)); 1756 if (error) 1757 return (error); 1758 return (EINVAL); 1759 } 1760 1761 if (luch.pid) 1762 return (EPERM); 1763 1764 error = copyin(args->datap, &lucd, sizeof(lucd)); 1765 if (error != 0) 1766 return (error); 1767 1768 /* We currently don't support setting any capabilities. */ 1769 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1770 linux_msg(td, 1771 "capset effective=0x%x, permitted=0x%x, " 1772 "inheritable=0x%x is not implemented", 1773 (int)lucd.effective, (int)lucd.permitted, 1774 (int)lucd.inheritable); 1775 return (EPERM); 1776 } 1777 1778 return (0); 1779 } 1780 1781 int 1782 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1783 { 1784 int error = 0, max_size; 1785 struct proc *p = td->td_proc; 1786 char comm[LINUX_MAX_COMM_LEN]; 1787 struct linux_emuldata *em; 1788 int pdeath_signal; 1789 1790 #ifdef DEBUG 1791 if (ldebug(prctl)) 1792 printf(ARGS(prctl, "%d, %d, %d, %d, %d"), args->option, 1793 args->arg2, args->arg3, args->arg4, args->arg5); 1794 #endif 1795 1796 switch (args->option) { 1797 case LINUX_PR_SET_PDEATHSIG: 1798 if (!LINUX_SIG_VALID(args->arg2)) 1799 return (EINVAL); 1800 em = em_find(p, EMUL_DOLOCK); 1801 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1802 em->pdeath_signal = args->arg2; 1803 EMUL_UNLOCK(&emul_lock); 1804 break; 1805 case LINUX_PR_GET_PDEATHSIG: 1806 em = em_find(p, EMUL_DOLOCK); 1807 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1808 pdeath_signal = em->pdeath_signal; 1809 EMUL_UNLOCK(&emul_lock); 1810 error = copyout(&pdeath_signal, 1811 (void *)(register_t)args->arg2, 1812 sizeof(pdeath_signal)); 1813 break; 1814 case LINUX_PR_GET_KEEPCAPS: 1815 /* 1816 * Indicate that we always clear the effective and 1817 * permitted capability sets when the user id becomes 1818 * non-zero (actually the capability sets are simply 1819 * always zero in the current implementation). 1820 */ 1821 td->td_retval[0] = 0; 1822 break; 1823 case LINUX_PR_SET_KEEPCAPS: 1824 /* 1825 * Ignore requests to keep the effective and permitted 1826 * capability sets when the user id becomes non-zero. 1827 */ 1828 break; 1829 case LINUX_PR_SET_NAME: 1830 /* 1831 * To be on the safe side we need to make sure to not 1832 * overflow the size a linux program expects. We already 1833 * do this here in the copyin, so that we don't need to 1834 * check on copyout. 1835 */ 1836 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1837 error = copyinstr((void *)(register_t)args->arg2, comm, 1838 max_size, NULL); 1839 1840 /* Linux silently truncates the name if it is too long. */ 1841 if (error == ENAMETOOLONG) { 1842 /* 1843 * XXX: copyinstr() isn't documented to populate the 1844 * array completely, so do a copyin() to be on the 1845 * safe side. This should be changed in case 1846 * copyinstr() is changed to guarantee this. 1847 */ 1848 error = copyin((void *)(register_t)args->arg2, comm, 1849 max_size - 1); 1850 comm[max_size - 1] = '\0'; 1851 } 1852 if (error) 1853 return (error); 1854 1855 PROC_LOCK(p); 1856 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1857 PROC_UNLOCK(p); 1858 break; 1859 case LINUX_PR_GET_NAME: 1860 PROC_LOCK(p); 1861 strlcpy(comm, p->p_comm, sizeof(comm)); 1862 PROC_UNLOCK(p); 1863 error = copyout(comm, (void *)(register_t)args->arg2, 1864 strlen(comm) + 1); 1865 break; 1866 default: 1867 error = EINVAL; 1868 break; 1869 } 1870 1871 return (error); 1872 } 1873 1874 /* 1875 * Get affinity of a process. 1876 */ 1877 int 1878 linux_sched_getaffinity(struct thread *td, 1879 struct linux_sched_getaffinity_args *args) 1880 { 1881 int error; 1882 struct cpuset_getaffinity_args cga; 1883 1884 #ifdef DEBUG 1885 if (ldebug(sched_getaffinity)) 1886 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 1887 args->len); 1888 #endif 1889 if (args->len < sizeof(cpuset_t)) 1890 return (EINVAL); 1891 1892 cga.level = CPU_LEVEL_WHICH; 1893 cga.which = CPU_WHICH_PID; 1894 cga.id = args->pid; 1895 cga.cpusetsize = sizeof(cpuset_t); 1896 cga.mask = (cpuset_t *) args->user_mask_ptr; 1897 1898 if ((error = sys_cpuset_getaffinity(td, &cga)) == 0) 1899 td->td_retval[0] = sizeof(cpuset_t); 1900 1901 return (error); 1902 } 1903 1904 /* 1905 * Set affinity of a process. 1906 */ 1907 int 1908 linux_sched_setaffinity(struct thread *td, 1909 struct linux_sched_setaffinity_args *args) 1910 { 1911 struct cpuset_setaffinity_args csa; 1912 1913 #ifdef DEBUG 1914 if (ldebug(sched_setaffinity)) 1915 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 1916 args->len); 1917 #endif 1918 if (args->len < sizeof(cpuset_t)) 1919 return (EINVAL); 1920 1921 csa.level = CPU_LEVEL_WHICH; 1922 csa.which = CPU_WHICH_PID; 1923 csa.id = args->pid; 1924 csa.cpusetsize = sizeof(cpuset_t); 1925 csa.mask = (cpuset_t *) args->user_mask_ptr; 1926 1927 return (sys_cpuset_setaffinity(td, &csa)); 1928 } 1929