1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 Søren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_compat.h" 34 #include "opt_kdtrace.h" 35 36 #include <sys/param.h> 37 #include <sys/blist.h> 38 #include <sys/fcntl.h> 39 #if defined(__i386__) 40 #include <sys/imgact_aout.h> 41 #endif 42 #include <sys/jail.h> 43 #include <sys/kernel.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/namei.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/reboot.h> 54 #include <sys/racct.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/sdt.h> 58 #include <sys/signalvar.h> 59 #include <sys/stat.h> 60 #include <sys/syscallsubr.h> 61 #include <sys/sysctl.h> 62 #include <sys/sysproto.h> 63 #include <sys/systm.h> 64 #include <sys/time.h> 65 #include <sys/vmmeter.h> 66 #include <sys/vnode.h> 67 #include <sys/wait.h> 68 #include <sys/cpuset.h> 69 70 #include <security/mac/mac_framework.h> 71 72 #include <vm/vm.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_kern.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_object.h> 78 #include <vm/swap_pager.h> 79 80 #ifdef COMPAT_LINUX32 81 #include <machine/../linux32/linux.h> 82 #include <machine/../linux32/linux32_proto.h> 83 #else 84 #include <machine/../linux/linux.h> 85 #include <machine/../linux/linux_proto.h> 86 #endif 87 88 #include <compat/linux/linux_dtrace.h> 89 #include <compat/linux/linux_file.h> 90 #include <compat/linux/linux_mib.h> 91 #include <compat/linux/linux_signal.h> 92 #include <compat/linux/linux_util.h> 93 #include <compat/linux/linux_sysproto.h> 94 #include <compat/linux/linux_emul.h> 95 #include <compat/linux/linux_misc.h> 96 97 /* DTrace init */ 98 LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE); 99 100 /* Linuxulator-global DTrace probes */ 101 LIN_SDT_PROBE_DECLARE(locks, emul_lock, locked); 102 LIN_SDT_PROBE_DECLARE(locks, emul_lock, unlock); 103 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, locked); 104 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, unlock); 105 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, locked); 106 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, unlock); 107 108 int stclohz; /* Statistics clock frequency */ 109 110 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 111 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 112 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 113 RLIMIT_MEMLOCK, RLIMIT_AS 114 }; 115 116 struct l_sysinfo { 117 l_long uptime; /* Seconds since boot */ 118 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 119 #define LINUX_SYSINFO_LOADS_SCALE 65536 120 l_ulong totalram; /* Total usable main memory size */ 121 l_ulong freeram; /* Available memory size */ 122 l_ulong sharedram; /* Amount of shared memory */ 123 l_ulong bufferram; /* Memory used by buffers */ 124 l_ulong totalswap; /* Total swap space size */ 125 l_ulong freeswap; /* swap space still available */ 126 l_ushort procs; /* Number of current processes */ 127 l_ushort pads; 128 l_ulong totalbig; 129 l_ulong freebig; 130 l_uint mem_unit; 131 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 132 }; 133 int 134 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 135 { 136 struct l_sysinfo sysinfo; 137 vm_object_t object; 138 int i, j; 139 struct timespec ts; 140 141 getnanouptime(&ts); 142 if (ts.tv_nsec != 0) 143 ts.tv_sec++; 144 sysinfo.uptime = ts.tv_sec; 145 146 /* Use the information from the mib to get our load averages */ 147 for (i = 0; i < 3; i++) 148 sysinfo.loads[i] = averunnable.ldavg[i] * 149 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 150 151 sysinfo.totalram = physmem * PAGE_SIZE; 152 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 153 154 sysinfo.sharedram = 0; 155 mtx_lock(&vm_object_list_mtx); 156 TAILQ_FOREACH(object, &vm_object_list, object_list) 157 if (object->shadow_count > 1) 158 sysinfo.sharedram += object->resident_page_count; 159 mtx_unlock(&vm_object_list_mtx); 160 161 sysinfo.sharedram *= PAGE_SIZE; 162 sysinfo.bufferram = 0; 163 164 swap_pager_status(&i, &j); 165 sysinfo.totalswap = i * PAGE_SIZE; 166 sysinfo.freeswap = (i - j) * PAGE_SIZE; 167 168 sysinfo.procs = nprocs; 169 170 /* The following are only present in newer Linux kernels. */ 171 sysinfo.totalbig = 0; 172 sysinfo.freebig = 0; 173 sysinfo.mem_unit = 1; 174 175 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 176 } 177 178 int 179 linux_alarm(struct thread *td, struct linux_alarm_args *args) 180 { 181 struct itimerval it, old_it; 182 u_int secs; 183 int error; 184 185 #ifdef DEBUG 186 if (ldebug(alarm)) 187 printf(ARGS(alarm, "%u"), args->secs); 188 #endif 189 190 secs = args->secs; 191 192 if (secs > INT_MAX) 193 secs = INT_MAX; 194 195 it.it_value.tv_sec = (long) secs; 196 it.it_value.tv_usec = 0; 197 it.it_interval.tv_sec = 0; 198 it.it_interval.tv_usec = 0; 199 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 200 if (error) 201 return (error); 202 if (timevalisset(&old_it.it_value)) { 203 if (old_it.it_value.tv_usec != 0) 204 old_it.it_value.tv_sec++; 205 td->td_retval[0] = old_it.it_value.tv_sec; 206 } 207 return (0); 208 } 209 210 int 211 linux_brk(struct thread *td, struct linux_brk_args *args) 212 { 213 struct vmspace *vm = td->td_proc->p_vmspace; 214 vm_offset_t new, old; 215 struct obreak_args /* { 216 char * nsize; 217 } */ tmp; 218 219 #ifdef DEBUG 220 if (ldebug(brk)) 221 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 222 #endif 223 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 224 new = (vm_offset_t)args->dsend; 225 tmp.nsize = (char *)new; 226 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 227 td->td_retval[0] = (long)new; 228 else 229 td->td_retval[0] = (long)old; 230 231 return (0); 232 } 233 234 #if defined(__i386__) 235 /* XXX: what about amd64/linux32? */ 236 237 int 238 linux_uselib(struct thread *td, struct linux_uselib_args *args) 239 { 240 struct nameidata ni; 241 struct vnode *vp; 242 struct exec *a_out; 243 struct vattr attr; 244 vm_offset_t vmaddr; 245 unsigned long file_offset; 246 unsigned long bss_size; 247 char *library; 248 ssize_t aresid; 249 int error; 250 int locked; 251 252 LCONVPATHEXIST(td, args->library, &library); 253 254 #ifdef DEBUG 255 if (ldebug(uselib)) 256 printf(ARGS(uselib, "%s"), library); 257 #endif 258 259 a_out = NULL; 260 locked = 0; 261 vp = NULL; 262 263 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 264 UIO_SYSSPACE, library, td); 265 error = namei(&ni); 266 LFREEPATH(library); 267 if (error) 268 goto cleanup; 269 270 vp = ni.ni_vp; 271 NDFREE(&ni, NDF_ONLY_PNBUF); 272 273 /* 274 * From here on down, we have a locked vnode that must be unlocked. 275 * XXX: The code below largely duplicates exec_check_permissions(). 276 */ 277 locked = 1; 278 279 /* Writable? */ 280 if (vp->v_writecount) { 281 error = ETXTBSY; 282 goto cleanup; 283 } 284 285 /* Executable? */ 286 error = VOP_GETATTR(vp, &attr, td->td_ucred); 287 if (error) 288 goto cleanup; 289 290 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 291 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 292 /* EACCESS is what exec(2) returns. */ 293 error = ENOEXEC; 294 goto cleanup; 295 } 296 297 /* Sensible size? */ 298 if (attr.va_size == 0) { 299 error = ENOEXEC; 300 goto cleanup; 301 } 302 303 /* Can we access it? */ 304 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 305 if (error) 306 goto cleanup; 307 308 /* 309 * XXX: This should use vn_open() so that it is properly authorized, 310 * and to reduce code redundancy all over the place here. 311 * XXX: Not really, it duplicates far more of exec_check_permissions() 312 * than vn_open(). 313 */ 314 #ifdef MAC 315 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 316 if (error) 317 goto cleanup; 318 #endif 319 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 320 if (error) 321 goto cleanup; 322 323 /* Pull in executable header into exec_map */ 324 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 325 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 326 if (error) 327 goto cleanup; 328 329 /* Is it a Linux binary ? */ 330 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 331 error = ENOEXEC; 332 goto cleanup; 333 } 334 335 /* 336 * While we are here, we should REALLY do some more checks 337 */ 338 339 /* Set file/virtual offset based on a.out variant. */ 340 switch ((int)(a_out->a_magic & 0xffff)) { 341 case 0413: /* ZMAGIC */ 342 file_offset = 1024; 343 break; 344 case 0314: /* QMAGIC */ 345 file_offset = 0; 346 break; 347 default: 348 error = ENOEXEC; 349 goto cleanup; 350 } 351 352 bss_size = round_page(a_out->a_bss); 353 354 /* Check various fields in header for validity/bounds. */ 355 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 356 error = ENOEXEC; 357 goto cleanup; 358 } 359 360 /* text + data can't exceed file size */ 361 if (a_out->a_data + a_out->a_text > attr.va_size) { 362 error = EFAULT; 363 goto cleanup; 364 } 365 366 /* 367 * text/data/bss must not exceed limits 368 * XXX - this is not complete. it should check current usage PLUS 369 * the resources needed by this library. 370 */ 371 PROC_LOCK(td->td_proc); 372 if (a_out->a_text > maxtsiz || 373 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA) || 374 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 375 bss_size) != 0) { 376 PROC_UNLOCK(td->td_proc); 377 error = ENOMEM; 378 goto cleanup; 379 } 380 PROC_UNLOCK(td->td_proc); 381 382 /* 383 * Prevent more writers. 384 * XXX: Note that if any of the VM operations fail below we don't 385 * clear this flag. 386 */ 387 VOP_SET_TEXT(vp); 388 389 /* 390 * Lock no longer needed 391 */ 392 locked = 0; 393 VOP_UNLOCK(vp, 0); 394 395 /* 396 * Check if file_offset page aligned. Currently we cannot handle 397 * misalinged file offsets, and so we read in the entire image 398 * (what a waste). 399 */ 400 if (file_offset & PAGE_MASK) { 401 #ifdef DEBUG 402 printf("uselib: Non page aligned binary %lu\n", file_offset); 403 #endif 404 /* Map text+data read/write/execute */ 405 406 /* a_entry is the load address and is page aligned */ 407 vmaddr = trunc_page(a_out->a_entry); 408 409 /* get anon user mapping, read+write+execute */ 410 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 411 &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, 412 VM_PROT_ALL, 0); 413 if (error) 414 goto cleanup; 415 416 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 417 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 418 td->td_ucred, NOCRED, &aresid, td); 419 if (error != 0) 420 goto cleanup; 421 if (aresid != 0) { 422 error = ENOEXEC; 423 goto cleanup; 424 } 425 } else { 426 #ifdef DEBUG 427 printf("uselib: Page aligned binary %lu\n", file_offset); 428 #endif 429 /* 430 * for QMAGIC, a_entry is 20 bytes beyond the load address 431 * to skip the executable header 432 */ 433 vmaddr = trunc_page(a_out->a_entry); 434 435 /* 436 * Map it all into the process's space as a single 437 * copy-on-write "data" segment. 438 */ 439 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 440 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 441 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 442 if (error) 443 goto cleanup; 444 } 445 #ifdef DEBUG 446 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 447 ((long *)vmaddr)[1]); 448 #endif 449 if (bss_size != 0) { 450 /* Calculate BSS start address */ 451 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 452 a_out->a_data; 453 454 /* allocate some 'anon' space */ 455 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 456 &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); 457 if (error) 458 goto cleanup; 459 } 460 461 cleanup: 462 /* Unlock vnode if needed */ 463 if (locked) 464 VOP_UNLOCK(vp, 0); 465 466 /* Release the temporary mapping. */ 467 if (a_out) 468 kmem_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 469 470 return (error); 471 } 472 473 #endif /* __i386__ */ 474 475 int 476 linux_select(struct thread *td, struct linux_select_args *args) 477 { 478 l_timeval ltv; 479 struct timeval tv0, tv1, utv, *tvp; 480 int error; 481 482 #ifdef DEBUG 483 if (ldebug(select)) 484 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 485 (void *)args->readfds, (void *)args->writefds, 486 (void *)args->exceptfds, (void *)args->timeout); 487 #endif 488 489 /* 490 * Store current time for computation of the amount of 491 * time left. 492 */ 493 if (args->timeout) { 494 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 495 goto select_out; 496 utv.tv_sec = ltv.tv_sec; 497 utv.tv_usec = ltv.tv_usec; 498 #ifdef DEBUG 499 if (ldebug(select)) 500 printf(LMSG("incoming timeout (%jd/%ld)"), 501 (intmax_t)utv.tv_sec, utv.tv_usec); 502 #endif 503 504 if (itimerfix(&utv)) { 505 /* 506 * The timeval was invalid. Convert it to something 507 * valid that will act as it does under Linux. 508 */ 509 utv.tv_sec += utv.tv_usec / 1000000; 510 utv.tv_usec %= 1000000; 511 if (utv.tv_usec < 0) { 512 utv.tv_sec -= 1; 513 utv.tv_usec += 1000000; 514 } 515 if (utv.tv_sec < 0) 516 timevalclear(&utv); 517 } 518 microtime(&tv0); 519 tvp = &utv; 520 } else 521 tvp = NULL; 522 523 error = kern_select(td, args->nfds, args->readfds, args->writefds, 524 args->exceptfds, tvp, sizeof(l_int) * 8); 525 526 #ifdef DEBUG 527 if (ldebug(select)) 528 printf(LMSG("real select returns %d"), error); 529 #endif 530 if (error) 531 goto select_out; 532 533 if (args->timeout) { 534 if (td->td_retval[0]) { 535 /* 536 * Compute how much time was left of the timeout, 537 * by subtracting the current time and the time 538 * before we started the call, and subtracting 539 * that result from the user-supplied value. 540 */ 541 microtime(&tv1); 542 timevalsub(&tv1, &tv0); 543 timevalsub(&utv, &tv1); 544 if (utv.tv_sec < 0) 545 timevalclear(&utv); 546 } else 547 timevalclear(&utv); 548 #ifdef DEBUG 549 if (ldebug(select)) 550 printf(LMSG("outgoing timeout (%jd/%ld)"), 551 (intmax_t)utv.tv_sec, utv.tv_usec); 552 #endif 553 ltv.tv_sec = utv.tv_sec; 554 ltv.tv_usec = utv.tv_usec; 555 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 556 goto select_out; 557 } 558 559 select_out: 560 #ifdef DEBUG 561 if (ldebug(select)) 562 printf(LMSG("select_out -> %d"), error); 563 #endif 564 return (error); 565 } 566 567 int 568 linux_mremap(struct thread *td, struct linux_mremap_args *args) 569 { 570 struct munmap_args /* { 571 void *addr; 572 size_t len; 573 } */ bsd_args; 574 int error = 0; 575 576 #ifdef DEBUG 577 if (ldebug(mremap)) 578 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 579 (void *)(uintptr_t)args->addr, 580 (unsigned long)args->old_len, 581 (unsigned long)args->new_len, 582 (unsigned long)args->flags); 583 #endif 584 585 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 586 td->td_retval[0] = 0; 587 return (EINVAL); 588 } 589 590 /* 591 * Check for the page alignment. 592 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 593 */ 594 if (args->addr & PAGE_MASK) { 595 td->td_retval[0] = 0; 596 return (EINVAL); 597 } 598 599 args->new_len = round_page(args->new_len); 600 args->old_len = round_page(args->old_len); 601 602 if (args->new_len > args->old_len) { 603 td->td_retval[0] = 0; 604 return (ENOMEM); 605 } 606 607 if (args->new_len < args->old_len) { 608 bsd_args.addr = 609 (caddr_t)((uintptr_t)args->addr + args->new_len); 610 bsd_args.len = args->old_len - args->new_len; 611 error = sys_munmap(td, &bsd_args); 612 } 613 614 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 615 return (error); 616 } 617 618 #define LINUX_MS_ASYNC 0x0001 619 #define LINUX_MS_INVALIDATE 0x0002 620 #define LINUX_MS_SYNC 0x0004 621 622 int 623 linux_msync(struct thread *td, struct linux_msync_args *args) 624 { 625 struct msync_args bsd_args; 626 627 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 628 bsd_args.len = (uintptr_t)args->len; 629 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 630 631 return (sys_msync(td, &bsd_args)); 632 } 633 634 int 635 linux_time(struct thread *td, struct linux_time_args *args) 636 { 637 struct timeval tv; 638 l_time_t tm; 639 int error; 640 641 #ifdef DEBUG 642 if (ldebug(time)) 643 printf(ARGS(time, "*")); 644 #endif 645 646 microtime(&tv); 647 tm = tv.tv_sec; 648 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 649 return (error); 650 td->td_retval[0] = tm; 651 return (0); 652 } 653 654 struct l_times_argv { 655 l_clock_t tms_utime; 656 l_clock_t tms_stime; 657 l_clock_t tms_cutime; 658 l_clock_t tms_cstime; 659 }; 660 661 662 /* 663 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 664 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 665 * auxiliary vector entry. 666 */ 667 #define CLK_TCK 100 668 669 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 670 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 671 672 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 673 CONVNTCK(r) : CONVOTCK(r)) 674 675 int 676 linux_times(struct thread *td, struct linux_times_args *args) 677 { 678 struct timeval tv, utime, stime, cutime, cstime; 679 struct l_times_argv tms; 680 struct proc *p; 681 int error; 682 683 #ifdef DEBUG 684 if (ldebug(times)) 685 printf(ARGS(times, "*")); 686 #endif 687 688 if (args->buf != NULL) { 689 p = td->td_proc; 690 PROC_LOCK(p); 691 PROC_SLOCK(p); 692 calcru(p, &utime, &stime); 693 PROC_SUNLOCK(p); 694 calccru(p, &cutime, &cstime); 695 PROC_UNLOCK(p); 696 697 tms.tms_utime = CONVTCK(utime); 698 tms.tms_stime = CONVTCK(stime); 699 700 tms.tms_cutime = CONVTCK(cutime); 701 tms.tms_cstime = CONVTCK(cstime); 702 703 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 704 return (error); 705 } 706 707 microuptime(&tv); 708 td->td_retval[0] = (int)CONVTCK(tv); 709 return (0); 710 } 711 712 int 713 linux_newuname(struct thread *td, struct linux_newuname_args *args) 714 { 715 struct l_new_utsname utsname; 716 char osname[LINUX_MAX_UTSNAME]; 717 char osrelease[LINUX_MAX_UTSNAME]; 718 char *p; 719 720 #ifdef DEBUG 721 if (ldebug(newuname)) 722 printf(ARGS(newuname, "*")); 723 #endif 724 725 linux_get_osname(td, osname); 726 linux_get_osrelease(td, osrelease); 727 728 bzero(&utsname, sizeof(utsname)); 729 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 730 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 731 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 732 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 733 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 734 for (p = utsname.version; *p != '\0'; ++p) 735 if (*p == '\n') { 736 *p = '\0'; 737 break; 738 } 739 strlcpy(utsname.machine, linux_platform, LINUX_MAX_UTSNAME); 740 741 return (copyout(&utsname, args->buf, sizeof(utsname))); 742 } 743 744 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 745 struct l_utimbuf { 746 l_time_t l_actime; 747 l_time_t l_modtime; 748 }; 749 750 int 751 linux_utime(struct thread *td, struct linux_utime_args *args) 752 { 753 struct timeval tv[2], *tvp; 754 struct l_utimbuf lut; 755 char *fname; 756 int error; 757 758 LCONVPATHEXIST(td, args->fname, &fname); 759 760 #ifdef DEBUG 761 if (ldebug(utime)) 762 printf(ARGS(utime, "%s, *"), fname); 763 #endif 764 765 if (args->times) { 766 if ((error = copyin(args->times, &lut, sizeof lut))) { 767 LFREEPATH(fname); 768 return (error); 769 } 770 tv[0].tv_sec = lut.l_actime; 771 tv[0].tv_usec = 0; 772 tv[1].tv_sec = lut.l_modtime; 773 tv[1].tv_usec = 0; 774 tvp = tv; 775 } else 776 tvp = NULL; 777 778 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 779 LFREEPATH(fname); 780 return (error); 781 } 782 783 int 784 linux_utimes(struct thread *td, struct linux_utimes_args *args) 785 { 786 l_timeval ltv[2]; 787 struct timeval tv[2], *tvp = NULL; 788 char *fname; 789 int error; 790 791 LCONVPATHEXIST(td, args->fname, &fname); 792 793 #ifdef DEBUG 794 if (ldebug(utimes)) 795 printf(ARGS(utimes, "%s, *"), fname); 796 #endif 797 798 if (args->tptr != NULL) { 799 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 800 LFREEPATH(fname); 801 return (error); 802 } 803 tv[0].tv_sec = ltv[0].tv_sec; 804 tv[0].tv_usec = ltv[0].tv_usec; 805 tv[1].tv_sec = ltv[1].tv_sec; 806 tv[1].tv_usec = ltv[1].tv_usec; 807 tvp = tv; 808 } 809 810 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 811 LFREEPATH(fname); 812 return (error); 813 } 814 815 int 816 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 817 { 818 l_timeval ltv[2]; 819 struct timeval tv[2], *tvp = NULL; 820 char *fname; 821 int error, dfd; 822 823 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 824 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 825 826 #ifdef DEBUG 827 if (ldebug(futimesat)) 828 printf(ARGS(futimesat, "%s, *"), fname); 829 #endif 830 831 if (args->utimes != NULL) { 832 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 833 LFREEPATH(fname); 834 return (error); 835 } 836 tv[0].tv_sec = ltv[0].tv_sec; 837 tv[0].tv_usec = ltv[0].tv_usec; 838 tv[1].tv_sec = ltv[1].tv_sec; 839 tv[1].tv_usec = ltv[1].tv_usec; 840 tvp = tv; 841 } 842 843 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 844 LFREEPATH(fname); 845 return (error); 846 } 847 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 848 849 int 850 linux_common_wait(struct thread *td, int pid, int *status, 851 int options, struct rusage *ru) 852 { 853 int error, tmpstat; 854 855 error = kern_wait(td, pid, &tmpstat, options, ru); 856 if (error) 857 return (error); 858 859 if (status) { 860 tmpstat &= 0xffff; 861 if (WIFSIGNALED(tmpstat)) 862 tmpstat = (tmpstat & 0xffffff80) | 863 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 864 else if (WIFSTOPPED(tmpstat)) 865 tmpstat = (tmpstat & 0xffff00ff) | 866 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 867 error = copyout(&tmpstat, status, sizeof(int)); 868 } 869 870 return (error); 871 } 872 873 int 874 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 875 { 876 int options; 877 878 #ifdef DEBUG 879 if (ldebug(waitpid)) 880 printf(ARGS(waitpid, "%d, %p, %d"), 881 args->pid, (void *)args->status, args->options); 882 #endif 883 /* 884 * this is necessary because the test in kern_wait doesn't work 885 * because we mess with the options here 886 */ 887 if (args->options & ~(WUNTRACED | WNOHANG | WCONTINUED | __WCLONE)) 888 return (EINVAL); 889 890 options = (args->options & (WNOHANG | WUNTRACED)); 891 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 892 if (args->options & __WCLONE) 893 options |= WLINUXCLONE; 894 895 return (linux_common_wait(td, args->pid, args->status, options, NULL)); 896 } 897 898 899 int 900 linux_mknod(struct thread *td, struct linux_mknod_args *args) 901 { 902 char *path; 903 int error; 904 905 LCONVPATHCREAT(td, args->path, &path); 906 907 #ifdef DEBUG 908 if (ldebug(mknod)) 909 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 910 #endif 911 912 switch (args->mode & S_IFMT) { 913 case S_IFIFO: 914 case S_IFSOCK: 915 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 916 break; 917 918 case S_IFCHR: 919 case S_IFBLK: 920 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 921 args->dev); 922 break; 923 924 case S_IFDIR: 925 error = EPERM; 926 break; 927 928 case 0: 929 args->mode |= S_IFREG; 930 /* FALLTHROUGH */ 931 case S_IFREG: 932 error = kern_open(td, path, UIO_SYSSPACE, 933 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 934 if (error == 0) 935 kern_close(td, td->td_retval[0]); 936 break; 937 938 default: 939 error = EINVAL; 940 break; 941 } 942 LFREEPATH(path); 943 return (error); 944 } 945 946 int 947 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 948 { 949 char *path; 950 int error, dfd; 951 952 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 953 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 954 955 #ifdef DEBUG 956 if (ldebug(mknodat)) 957 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 958 #endif 959 960 switch (args->mode & S_IFMT) { 961 case S_IFIFO: 962 case S_IFSOCK: 963 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 964 break; 965 966 case S_IFCHR: 967 case S_IFBLK: 968 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 969 args->dev); 970 break; 971 972 case S_IFDIR: 973 error = EPERM; 974 break; 975 976 case 0: 977 args->mode |= S_IFREG; 978 /* FALLTHROUGH */ 979 case S_IFREG: 980 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 981 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 982 if (error == 0) 983 kern_close(td, td->td_retval[0]); 984 break; 985 986 default: 987 error = EINVAL; 988 break; 989 } 990 LFREEPATH(path); 991 return (error); 992 } 993 994 /* 995 * UGH! This is just about the dumbest idea I've ever heard!! 996 */ 997 int 998 linux_personality(struct thread *td, struct linux_personality_args *args) 999 { 1000 #ifdef DEBUG 1001 if (ldebug(personality)) 1002 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 1003 #endif 1004 if (args->per != 0) 1005 return (EINVAL); 1006 1007 /* Yes Jim, it's still a Linux... */ 1008 td->td_retval[0] = 0; 1009 return (0); 1010 } 1011 1012 struct l_itimerval { 1013 l_timeval it_interval; 1014 l_timeval it_value; 1015 }; 1016 1017 #define B2L_ITIMERVAL(bip, lip) \ 1018 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1019 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1020 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1021 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1022 1023 int 1024 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1025 { 1026 int error; 1027 struct l_itimerval ls; 1028 struct itimerval aitv, oitv; 1029 1030 #ifdef DEBUG 1031 if (ldebug(setitimer)) 1032 printf(ARGS(setitimer, "%p, %p"), 1033 (void *)uap->itv, (void *)uap->oitv); 1034 #endif 1035 1036 if (uap->itv == NULL) { 1037 uap->itv = uap->oitv; 1038 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1039 } 1040 1041 error = copyin(uap->itv, &ls, sizeof(ls)); 1042 if (error != 0) 1043 return (error); 1044 B2L_ITIMERVAL(&aitv, &ls); 1045 #ifdef DEBUG 1046 if (ldebug(setitimer)) { 1047 printf("setitimer: value: sec: %jd, usec: %ld\n", 1048 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1049 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1050 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1051 } 1052 #endif 1053 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1054 if (error != 0 || uap->oitv == NULL) 1055 return (error); 1056 B2L_ITIMERVAL(&ls, &oitv); 1057 1058 return (copyout(&ls, uap->oitv, sizeof(ls))); 1059 } 1060 1061 int 1062 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1063 { 1064 int error; 1065 struct l_itimerval ls; 1066 struct itimerval aitv; 1067 1068 #ifdef DEBUG 1069 if (ldebug(getitimer)) 1070 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1071 #endif 1072 error = kern_getitimer(td, uap->which, &aitv); 1073 if (error != 0) 1074 return (error); 1075 B2L_ITIMERVAL(&ls, &aitv); 1076 return (copyout(&ls, uap->itv, sizeof(ls))); 1077 } 1078 1079 int 1080 linux_nice(struct thread *td, struct linux_nice_args *args) 1081 { 1082 struct setpriority_args bsd_args; 1083 1084 bsd_args.which = PRIO_PROCESS; 1085 bsd_args.who = 0; /* current process */ 1086 bsd_args.prio = args->inc; 1087 return (sys_setpriority(td, &bsd_args)); 1088 } 1089 1090 int 1091 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1092 { 1093 struct ucred *newcred, *oldcred; 1094 l_gid_t *linux_gidset; 1095 gid_t *bsd_gidset; 1096 int ngrp, error; 1097 struct proc *p; 1098 1099 ngrp = args->gidsetsize; 1100 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1101 return (EINVAL); 1102 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_TEMP, M_WAITOK); 1103 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1104 if (error) 1105 goto out; 1106 newcred = crget(); 1107 p = td->td_proc; 1108 PROC_LOCK(p); 1109 oldcred = crcopysafe(p, newcred); 1110 1111 /* 1112 * cr_groups[0] holds egid. Setting the whole set from 1113 * the supplied set will cause egid to be changed too. 1114 * Keep cr_groups[0] unchanged to prevent that. 1115 */ 1116 1117 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1118 PROC_UNLOCK(p); 1119 crfree(newcred); 1120 goto out; 1121 } 1122 1123 if (ngrp > 0) { 1124 newcred->cr_ngroups = ngrp + 1; 1125 1126 bsd_gidset = newcred->cr_groups; 1127 ngrp--; 1128 while (ngrp >= 0) { 1129 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1130 ngrp--; 1131 } 1132 } else 1133 newcred->cr_ngroups = 1; 1134 1135 setsugid(p); 1136 p->p_ucred = newcred; 1137 PROC_UNLOCK(p); 1138 crfree(oldcred); 1139 error = 0; 1140 out: 1141 free(linux_gidset, M_TEMP); 1142 return (error); 1143 } 1144 1145 int 1146 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1147 { 1148 struct ucred *cred; 1149 l_gid_t *linux_gidset; 1150 gid_t *bsd_gidset; 1151 int bsd_gidsetsz, ngrp, error; 1152 1153 cred = td->td_ucred; 1154 bsd_gidset = cred->cr_groups; 1155 bsd_gidsetsz = cred->cr_ngroups - 1; 1156 1157 /* 1158 * cr_groups[0] holds egid. Returning the whole set 1159 * here will cause a duplicate. Exclude cr_groups[0] 1160 * to prevent that. 1161 */ 1162 1163 if ((ngrp = args->gidsetsize) == 0) { 1164 td->td_retval[0] = bsd_gidsetsz; 1165 return (0); 1166 } 1167 1168 if (ngrp < bsd_gidsetsz) 1169 return (EINVAL); 1170 1171 ngrp = 0; 1172 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1173 M_TEMP, M_WAITOK); 1174 while (ngrp < bsd_gidsetsz) { 1175 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1176 ngrp++; 1177 } 1178 1179 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1180 free(linux_gidset, M_TEMP); 1181 if (error) 1182 return (error); 1183 1184 td->td_retval[0] = ngrp; 1185 return (0); 1186 } 1187 1188 int 1189 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1190 { 1191 struct rlimit bsd_rlim; 1192 struct l_rlimit rlim; 1193 u_int which; 1194 int error; 1195 1196 #ifdef DEBUG 1197 if (ldebug(setrlimit)) 1198 printf(ARGS(setrlimit, "%d, %p"), 1199 args->resource, (void *)args->rlim); 1200 #endif 1201 1202 if (args->resource >= LINUX_RLIM_NLIMITS) 1203 return (EINVAL); 1204 1205 which = linux_to_bsd_resource[args->resource]; 1206 if (which == -1) 1207 return (EINVAL); 1208 1209 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1210 if (error) 1211 return (error); 1212 1213 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1214 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1215 return (kern_setrlimit(td, which, &bsd_rlim)); 1216 } 1217 1218 int 1219 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1220 { 1221 struct l_rlimit rlim; 1222 struct proc *p = td->td_proc; 1223 struct rlimit bsd_rlim; 1224 u_int which; 1225 1226 #ifdef DEBUG 1227 if (ldebug(old_getrlimit)) 1228 printf(ARGS(old_getrlimit, "%d, %p"), 1229 args->resource, (void *)args->rlim); 1230 #endif 1231 1232 if (args->resource >= LINUX_RLIM_NLIMITS) 1233 return (EINVAL); 1234 1235 which = linux_to_bsd_resource[args->resource]; 1236 if (which == -1) 1237 return (EINVAL); 1238 1239 PROC_LOCK(p); 1240 lim_rlimit(p, which, &bsd_rlim); 1241 PROC_UNLOCK(p); 1242 1243 #ifdef COMPAT_LINUX32 1244 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1245 if (rlim.rlim_cur == UINT_MAX) 1246 rlim.rlim_cur = INT_MAX; 1247 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1248 if (rlim.rlim_max == UINT_MAX) 1249 rlim.rlim_max = INT_MAX; 1250 #else 1251 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1252 if (rlim.rlim_cur == ULONG_MAX) 1253 rlim.rlim_cur = LONG_MAX; 1254 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1255 if (rlim.rlim_max == ULONG_MAX) 1256 rlim.rlim_max = LONG_MAX; 1257 #endif 1258 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1259 } 1260 1261 int 1262 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1263 { 1264 struct l_rlimit rlim; 1265 struct proc *p = td->td_proc; 1266 struct rlimit bsd_rlim; 1267 u_int which; 1268 1269 #ifdef DEBUG 1270 if (ldebug(getrlimit)) 1271 printf(ARGS(getrlimit, "%d, %p"), 1272 args->resource, (void *)args->rlim); 1273 #endif 1274 1275 if (args->resource >= LINUX_RLIM_NLIMITS) 1276 return (EINVAL); 1277 1278 which = linux_to_bsd_resource[args->resource]; 1279 if (which == -1) 1280 return (EINVAL); 1281 1282 PROC_LOCK(p); 1283 lim_rlimit(p, which, &bsd_rlim); 1284 PROC_UNLOCK(p); 1285 1286 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1287 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1288 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1289 } 1290 1291 int 1292 linux_sched_setscheduler(struct thread *td, 1293 struct linux_sched_setscheduler_args *args) 1294 { 1295 struct sched_setscheduler_args bsd; 1296 1297 #ifdef DEBUG 1298 if (ldebug(sched_setscheduler)) 1299 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1300 args->pid, args->policy, (const void *)args->param); 1301 #endif 1302 1303 switch (args->policy) { 1304 case LINUX_SCHED_OTHER: 1305 bsd.policy = SCHED_OTHER; 1306 break; 1307 case LINUX_SCHED_FIFO: 1308 bsd.policy = SCHED_FIFO; 1309 break; 1310 case LINUX_SCHED_RR: 1311 bsd.policy = SCHED_RR; 1312 break; 1313 default: 1314 return (EINVAL); 1315 } 1316 1317 bsd.pid = args->pid; 1318 bsd.param = (struct sched_param *)args->param; 1319 return (sys_sched_setscheduler(td, &bsd)); 1320 } 1321 1322 int 1323 linux_sched_getscheduler(struct thread *td, 1324 struct linux_sched_getscheduler_args *args) 1325 { 1326 struct sched_getscheduler_args bsd; 1327 int error; 1328 1329 #ifdef DEBUG 1330 if (ldebug(sched_getscheduler)) 1331 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1332 #endif 1333 1334 bsd.pid = args->pid; 1335 error = sys_sched_getscheduler(td, &bsd); 1336 1337 switch (td->td_retval[0]) { 1338 case SCHED_OTHER: 1339 td->td_retval[0] = LINUX_SCHED_OTHER; 1340 break; 1341 case SCHED_FIFO: 1342 td->td_retval[0] = LINUX_SCHED_FIFO; 1343 break; 1344 case SCHED_RR: 1345 td->td_retval[0] = LINUX_SCHED_RR; 1346 break; 1347 } 1348 1349 return (error); 1350 } 1351 1352 int 1353 linux_sched_get_priority_max(struct thread *td, 1354 struct linux_sched_get_priority_max_args *args) 1355 { 1356 struct sched_get_priority_max_args bsd; 1357 1358 #ifdef DEBUG 1359 if (ldebug(sched_get_priority_max)) 1360 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1361 #endif 1362 1363 switch (args->policy) { 1364 case LINUX_SCHED_OTHER: 1365 bsd.policy = SCHED_OTHER; 1366 break; 1367 case LINUX_SCHED_FIFO: 1368 bsd.policy = SCHED_FIFO; 1369 break; 1370 case LINUX_SCHED_RR: 1371 bsd.policy = SCHED_RR; 1372 break; 1373 default: 1374 return (EINVAL); 1375 } 1376 return (sys_sched_get_priority_max(td, &bsd)); 1377 } 1378 1379 int 1380 linux_sched_get_priority_min(struct thread *td, 1381 struct linux_sched_get_priority_min_args *args) 1382 { 1383 struct sched_get_priority_min_args bsd; 1384 1385 #ifdef DEBUG 1386 if (ldebug(sched_get_priority_min)) 1387 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1388 #endif 1389 1390 switch (args->policy) { 1391 case LINUX_SCHED_OTHER: 1392 bsd.policy = SCHED_OTHER; 1393 break; 1394 case LINUX_SCHED_FIFO: 1395 bsd.policy = SCHED_FIFO; 1396 break; 1397 case LINUX_SCHED_RR: 1398 bsd.policy = SCHED_RR; 1399 break; 1400 default: 1401 return (EINVAL); 1402 } 1403 return (sys_sched_get_priority_min(td, &bsd)); 1404 } 1405 1406 #define REBOOT_CAD_ON 0x89abcdef 1407 #define REBOOT_CAD_OFF 0 1408 #define REBOOT_HALT 0xcdef0123 1409 #define REBOOT_RESTART 0x01234567 1410 #define REBOOT_RESTART2 0xA1B2C3D4 1411 #define REBOOT_POWEROFF 0x4321FEDC 1412 #define REBOOT_MAGIC1 0xfee1dead 1413 #define REBOOT_MAGIC2 0x28121969 1414 #define REBOOT_MAGIC2A 0x05121996 1415 #define REBOOT_MAGIC2B 0x16041998 1416 1417 int 1418 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1419 { 1420 struct reboot_args bsd_args; 1421 1422 #ifdef DEBUG 1423 if (ldebug(reboot)) 1424 printf(ARGS(reboot, "0x%x"), args->cmd); 1425 #endif 1426 1427 if (args->magic1 != REBOOT_MAGIC1) 1428 return (EINVAL); 1429 1430 switch (args->magic2) { 1431 case REBOOT_MAGIC2: 1432 case REBOOT_MAGIC2A: 1433 case REBOOT_MAGIC2B: 1434 break; 1435 default: 1436 return (EINVAL); 1437 } 1438 1439 switch (args->cmd) { 1440 case REBOOT_CAD_ON: 1441 case REBOOT_CAD_OFF: 1442 return (priv_check(td, PRIV_REBOOT)); 1443 case REBOOT_HALT: 1444 bsd_args.opt = RB_HALT; 1445 break; 1446 case REBOOT_RESTART: 1447 case REBOOT_RESTART2: 1448 bsd_args.opt = 0; 1449 break; 1450 case REBOOT_POWEROFF: 1451 bsd_args.opt = RB_POWEROFF; 1452 break; 1453 default: 1454 return (EINVAL); 1455 } 1456 return (sys_reboot(td, &bsd_args)); 1457 } 1458 1459 1460 /* 1461 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1462 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1463 * are assumed to be preserved. The following lightweight syscalls fixes 1464 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1465 * 1466 * linux_getpid() - MP SAFE 1467 * linux_getgid() - MP SAFE 1468 * linux_getuid() - MP SAFE 1469 */ 1470 1471 int 1472 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1473 { 1474 struct linux_emuldata *em; 1475 1476 #ifdef DEBUG 1477 if (ldebug(getpid)) 1478 printf(ARGS(getpid, "")); 1479 #endif 1480 1481 if (linux_use26(td)) { 1482 em = em_find(td->td_proc, EMUL_DONTLOCK); 1483 KASSERT(em != NULL, ("getpid: emuldata not found.\n")); 1484 td->td_retval[0] = em->shared->group_pid; 1485 } else { 1486 td->td_retval[0] = td->td_proc->p_pid; 1487 } 1488 1489 return (0); 1490 } 1491 1492 int 1493 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1494 { 1495 1496 #ifdef DEBUG 1497 if (ldebug(gettid)) 1498 printf(ARGS(gettid, "")); 1499 #endif 1500 1501 td->td_retval[0] = td->td_proc->p_pid; 1502 return (0); 1503 } 1504 1505 1506 int 1507 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1508 { 1509 struct linux_emuldata *em; 1510 struct proc *p, *pp; 1511 1512 #ifdef DEBUG 1513 if (ldebug(getppid)) 1514 printf(ARGS(getppid, "")); 1515 #endif 1516 1517 if (!linux_use26(td)) { 1518 PROC_LOCK(td->td_proc); 1519 td->td_retval[0] = td->td_proc->p_pptr->p_pid; 1520 PROC_UNLOCK(td->td_proc); 1521 return (0); 1522 } 1523 1524 em = em_find(td->td_proc, EMUL_DONTLOCK); 1525 1526 KASSERT(em != NULL, ("getppid: process emuldata not found.\n")); 1527 1528 /* find the group leader */ 1529 p = pfind(em->shared->group_pid); 1530 1531 if (p == NULL) { 1532 #ifdef DEBUG 1533 printf(LMSG("parent process not found.\n")); 1534 #endif 1535 return (0); 1536 } 1537 1538 pp = p->p_pptr; /* switch to parent */ 1539 PROC_LOCK(pp); 1540 PROC_UNLOCK(p); 1541 1542 /* if its also linux process */ 1543 if (pp->p_sysent == &elf_linux_sysvec) { 1544 em = em_find(pp, EMUL_DONTLOCK); 1545 KASSERT(em != NULL, ("getppid: parent emuldata not found.\n")); 1546 1547 td->td_retval[0] = em->shared->group_pid; 1548 } else 1549 td->td_retval[0] = pp->p_pid; 1550 1551 PROC_UNLOCK(pp); 1552 1553 return (0); 1554 } 1555 1556 int 1557 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1558 { 1559 1560 #ifdef DEBUG 1561 if (ldebug(getgid)) 1562 printf(ARGS(getgid, "")); 1563 #endif 1564 1565 td->td_retval[0] = td->td_ucred->cr_rgid; 1566 return (0); 1567 } 1568 1569 int 1570 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1571 { 1572 1573 #ifdef DEBUG 1574 if (ldebug(getuid)) 1575 printf(ARGS(getuid, "")); 1576 #endif 1577 1578 td->td_retval[0] = td->td_ucred->cr_ruid; 1579 return (0); 1580 } 1581 1582 1583 int 1584 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1585 { 1586 struct getsid_args bsd; 1587 1588 #ifdef DEBUG 1589 if (ldebug(getsid)) 1590 printf(ARGS(getsid, "%i"), args->pid); 1591 #endif 1592 1593 bsd.pid = args->pid; 1594 return (sys_getsid(td, &bsd)); 1595 } 1596 1597 int 1598 linux_nosys(struct thread *td, struct nosys_args *ignore) 1599 { 1600 1601 return (ENOSYS); 1602 } 1603 1604 int 1605 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1606 { 1607 struct getpriority_args bsd_args; 1608 int error; 1609 1610 #ifdef DEBUG 1611 if (ldebug(getpriority)) 1612 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1613 #endif 1614 1615 bsd_args.which = args->which; 1616 bsd_args.who = args->who; 1617 error = sys_getpriority(td, &bsd_args); 1618 td->td_retval[0] = 20 - td->td_retval[0]; 1619 return (error); 1620 } 1621 1622 int 1623 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1624 { 1625 int name[2]; 1626 1627 #ifdef DEBUG 1628 if (ldebug(sethostname)) 1629 printf(ARGS(sethostname, "*, %i"), args->len); 1630 #endif 1631 1632 name[0] = CTL_KERN; 1633 name[1] = KERN_HOSTNAME; 1634 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1635 args->len, 0, 0)); 1636 } 1637 1638 int 1639 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1640 { 1641 int name[2]; 1642 1643 #ifdef DEBUG 1644 if (ldebug(setdomainname)) 1645 printf(ARGS(setdomainname, "*, %i"), args->len); 1646 #endif 1647 1648 name[0] = CTL_KERN; 1649 name[1] = KERN_NISDOMAINNAME; 1650 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1651 args->len, 0, 0)); 1652 } 1653 1654 int 1655 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1656 { 1657 struct linux_emuldata *em; 1658 1659 #ifdef DEBUG 1660 if (ldebug(exit_group)) 1661 printf(ARGS(exit_group, "%i"), args->error_code); 1662 #endif 1663 1664 em = em_find(td->td_proc, EMUL_DONTLOCK); 1665 if (em->shared->refs > 1) { 1666 EMUL_SHARED_WLOCK(&emul_shared_lock); 1667 em->shared->flags |= EMUL_SHARED_HASXSTAT; 1668 em->shared->xstat = W_EXITCODE(args->error_code, 0); 1669 EMUL_SHARED_WUNLOCK(&emul_shared_lock); 1670 if (linux_use26(td)) 1671 linux_kill_threads(td, SIGKILL); 1672 } 1673 1674 /* 1675 * XXX: we should send a signal to the parent if 1676 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1677 * as it doesnt occur often. 1678 */ 1679 exit1(td, W_EXITCODE(args->error_code, 0)); 1680 1681 return (0); 1682 } 1683 1684 #define _LINUX_CAPABILITY_VERSION 0x19980330 1685 1686 struct l_user_cap_header { 1687 l_int version; 1688 l_int pid; 1689 }; 1690 1691 struct l_user_cap_data { 1692 l_int effective; 1693 l_int permitted; 1694 l_int inheritable; 1695 }; 1696 1697 int 1698 linux_capget(struct thread *td, struct linux_capget_args *args) 1699 { 1700 struct l_user_cap_header luch; 1701 struct l_user_cap_data lucd; 1702 int error; 1703 1704 if (args->hdrp == NULL) 1705 return (EFAULT); 1706 1707 error = copyin(args->hdrp, &luch, sizeof(luch)); 1708 if (error != 0) 1709 return (error); 1710 1711 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1712 luch.version = _LINUX_CAPABILITY_VERSION; 1713 error = copyout(&luch, args->hdrp, sizeof(luch)); 1714 if (error) 1715 return (error); 1716 return (EINVAL); 1717 } 1718 1719 if (luch.pid) 1720 return (EPERM); 1721 1722 if (args->datap) { 1723 /* 1724 * The current implementation doesn't support setting 1725 * a capability (it's essentially a stub) so indicate 1726 * that no capabilities are currently set or available 1727 * to request. 1728 */ 1729 bzero (&lucd, sizeof(lucd)); 1730 error = copyout(&lucd, args->datap, sizeof(lucd)); 1731 } 1732 1733 return (error); 1734 } 1735 1736 int 1737 linux_capset(struct thread *td, struct linux_capset_args *args) 1738 { 1739 struct l_user_cap_header luch; 1740 struct l_user_cap_data lucd; 1741 int error; 1742 1743 if (args->hdrp == NULL || args->datap == NULL) 1744 return (EFAULT); 1745 1746 error = copyin(args->hdrp, &luch, sizeof(luch)); 1747 if (error != 0) 1748 return (error); 1749 1750 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1751 luch.version = _LINUX_CAPABILITY_VERSION; 1752 error = copyout(&luch, args->hdrp, sizeof(luch)); 1753 if (error) 1754 return (error); 1755 return (EINVAL); 1756 } 1757 1758 if (luch.pid) 1759 return (EPERM); 1760 1761 error = copyin(args->datap, &lucd, sizeof(lucd)); 1762 if (error != 0) 1763 return (error); 1764 1765 /* We currently don't support setting any capabilities. */ 1766 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1767 linux_msg(td, 1768 "capset effective=0x%x, permitted=0x%x, " 1769 "inheritable=0x%x is not implemented", 1770 (int)lucd.effective, (int)lucd.permitted, 1771 (int)lucd.inheritable); 1772 return (EPERM); 1773 } 1774 1775 return (0); 1776 } 1777 1778 int 1779 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1780 { 1781 int error = 0, max_size; 1782 struct proc *p = td->td_proc; 1783 char comm[LINUX_MAX_COMM_LEN]; 1784 struct linux_emuldata *em; 1785 int pdeath_signal; 1786 1787 #ifdef DEBUG 1788 if (ldebug(prctl)) 1789 printf(ARGS(prctl, "%d, %d, %d, %d, %d"), args->option, 1790 args->arg2, args->arg3, args->arg4, args->arg5); 1791 #endif 1792 1793 switch (args->option) { 1794 case LINUX_PR_SET_PDEATHSIG: 1795 if (!LINUX_SIG_VALID(args->arg2)) 1796 return (EINVAL); 1797 em = em_find(p, EMUL_DOLOCK); 1798 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1799 em->pdeath_signal = args->arg2; 1800 EMUL_UNLOCK(&emul_lock); 1801 break; 1802 case LINUX_PR_GET_PDEATHSIG: 1803 em = em_find(p, EMUL_DOLOCK); 1804 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1805 pdeath_signal = em->pdeath_signal; 1806 EMUL_UNLOCK(&emul_lock); 1807 error = copyout(&pdeath_signal, 1808 (void *)(register_t)args->arg2, 1809 sizeof(pdeath_signal)); 1810 break; 1811 case LINUX_PR_GET_KEEPCAPS: 1812 /* 1813 * Indicate that we always clear the effective and 1814 * permitted capability sets when the user id becomes 1815 * non-zero (actually the capability sets are simply 1816 * always zero in the current implementation). 1817 */ 1818 td->td_retval[0] = 0; 1819 break; 1820 case LINUX_PR_SET_KEEPCAPS: 1821 /* 1822 * Ignore requests to keep the effective and permitted 1823 * capability sets when the user id becomes non-zero. 1824 */ 1825 break; 1826 case LINUX_PR_SET_NAME: 1827 /* 1828 * To be on the safe side we need to make sure to not 1829 * overflow the size a linux program expects. We already 1830 * do this here in the copyin, so that we don't need to 1831 * check on copyout. 1832 */ 1833 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1834 error = copyinstr((void *)(register_t)args->arg2, comm, 1835 max_size, NULL); 1836 1837 /* Linux silently truncates the name if it is too long. */ 1838 if (error == ENAMETOOLONG) { 1839 /* 1840 * XXX: copyinstr() isn't documented to populate the 1841 * array completely, so do a copyin() to be on the 1842 * safe side. This should be changed in case 1843 * copyinstr() is changed to guarantee this. 1844 */ 1845 error = copyin((void *)(register_t)args->arg2, comm, 1846 max_size - 1); 1847 comm[max_size - 1] = '\0'; 1848 } 1849 if (error) 1850 return (error); 1851 1852 PROC_LOCK(p); 1853 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1854 PROC_UNLOCK(p); 1855 break; 1856 case LINUX_PR_GET_NAME: 1857 PROC_LOCK(p); 1858 strlcpy(comm, p->p_comm, sizeof(comm)); 1859 PROC_UNLOCK(p); 1860 error = copyout(comm, (void *)(register_t)args->arg2, 1861 strlen(comm) + 1); 1862 break; 1863 default: 1864 error = EINVAL; 1865 break; 1866 } 1867 1868 return (error); 1869 } 1870 1871 /* 1872 * Get affinity of a process. 1873 */ 1874 int 1875 linux_sched_getaffinity(struct thread *td, 1876 struct linux_sched_getaffinity_args *args) 1877 { 1878 int error; 1879 struct cpuset_getaffinity_args cga; 1880 1881 #ifdef DEBUG 1882 if (ldebug(sched_getaffinity)) 1883 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 1884 args->len); 1885 #endif 1886 if (args->len < sizeof(cpuset_t)) 1887 return (EINVAL); 1888 1889 cga.level = CPU_LEVEL_WHICH; 1890 cga.which = CPU_WHICH_PID; 1891 cga.id = args->pid; 1892 cga.cpusetsize = sizeof(cpuset_t); 1893 cga.mask = (cpuset_t *) args->user_mask_ptr; 1894 1895 if ((error = sys_cpuset_getaffinity(td, &cga)) == 0) 1896 td->td_retval[0] = sizeof(cpuset_t); 1897 1898 return (error); 1899 } 1900 1901 /* 1902 * Set affinity of a process. 1903 */ 1904 int 1905 linux_sched_setaffinity(struct thread *td, 1906 struct linux_sched_setaffinity_args *args) 1907 { 1908 struct cpuset_setaffinity_args csa; 1909 1910 #ifdef DEBUG 1911 if (ldebug(sched_setaffinity)) 1912 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 1913 args->len); 1914 #endif 1915 if (args->len < sizeof(cpuset_t)) 1916 return (EINVAL); 1917 1918 csa.level = CPU_LEVEL_WHICH; 1919 csa.which = CPU_WHICH_PID; 1920 csa.id = args->pid; 1921 csa.cpusetsize = sizeof(cpuset_t); 1922 csa.mask = (cpuset_t *) args->user_mask_ptr; 1923 1924 return (sys_cpuset_setaffinity(td, &csa)); 1925 } 1926