1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 Søren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_compat.h" 34 35 #include <sys/param.h> 36 #include <sys/blist.h> 37 #include <sys/fcntl.h> 38 #if defined(__i386__) 39 #include <sys/imgact_aout.h> 40 #endif 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/mount.h> 48 #include <sys/mutex.h> 49 #include <sys/namei.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/reboot.h> 53 #include <sys/racct.h> 54 #include <sys/resourcevar.h> 55 #include <sys/sched.h> 56 #include <sys/signalvar.h> 57 #include <sys/stat.h> 58 #include <sys/syscallsubr.h> 59 #include <sys/sysctl.h> 60 #include <sys/sysproto.h> 61 #include <sys/systm.h> 62 #include <sys/time.h> 63 #include <sys/vmmeter.h> 64 #include <sys/vnode.h> 65 #include <sys/wait.h> 66 #include <sys/cpuset.h> 67 68 #include <security/mac/mac_framework.h> 69 70 #include <vm/vm.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_kern.h> 73 #include <vm/vm_map.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_object.h> 76 #include <vm/swap_pager.h> 77 78 #ifdef COMPAT_LINUX32 79 #include <machine/../linux32/linux.h> 80 #include <machine/../linux32/linux32_proto.h> 81 #else 82 #include <machine/../linux/linux.h> 83 #include <machine/../linux/linux_proto.h> 84 #endif 85 86 #include <compat/linux/linux_file.h> 87 #include <compat/linux/linux_mib.h> 88 #include <compat/linux/linux_signal.h> 89 #include <compat/linux/linux_util.h> 90 #include <compat/linux/linux_sysproto.h> 91 #include <compat/linux/linux_emul.h> 92 #include <compat/linux/linux_misc.h> 93 94 int stclohz; /* Statistics clock frequency */ 95 96 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 97 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 98 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 99 RLIMIT_MEMLOCK, RLIMIT_AS 100 }; 101 102 struct l_sysinfo { 103 l_long uptime; /* Seconds since boot */ 104 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 105 #define LINUX_SYSINFO_LOADS_SCALE 65536 106 l_ulong totalram; /* Total usable main memory size */ 107 l_ulong freeram; /* Available memory size */ 108 l_ulong sharedram; /* Amount of shared memory */ 109 l_ulong bufferram; /* Memory used by buffers */ 110 l_ulong totalswap; /* Total swap space size */ 111 l_ulong freeswap; /* swap space still available */ 112 l_ushort procs; /* Number of current processes */ 113 l_ushort pads; 114 l_ulong totalbig; 115 l_ulong freebig; 116 l_uint mem_unit; 117 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 118 }; 119 int 120 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 121 { 122 struct l_sysinfo sysinfo; 123 vm_object_t object; 124 int i, j; 125 struct timespec ts; 126 127 getnanouptime(&ts); 128 if (ts.tv_nsec != 0) 129 ts.tv_sec++; 130 sysinfo.uptime = ts.tv_sec; 131 132 /* Use the information from the mib to get our load averages */ 133 for (i = 0; i < 3; i++) 134 sysinfo.loads[i] = averunnable.ldavg[i] * 135 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 136 137 sysinfo.totalram = physmem * PAGE_SIZE; 138 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 139 140 sysinfo.sharedram = 0; 141 mtx_lock(&vm_object_list_mtx); 142 TAILQ_FOREACH(object, &vm_object_list, object_list) 143 if (object->shadow_count > 1) 144 sysinfo.sharedram += object->resident_page_count; 145 mtx_unlock(&vm_object_list_mtx); 146 147 sysinfo.sharedram *= PAGE_SIZE; 148 sysinfo.bufferram = 0; 149 150 swap_pager_status(&i, &j); 151 sysinfo.totalswap = i * PAGE_SIZE; 152 sysinfo.freeswap = (i - j) * PAGE_SIZE; 153 154 sysinfo.procs = nprocs; 155 156 /* The following are only present in newer Linux kernels. */ 157 sysinfo.totalbig = 0; 158 sysinfo.freebig = 0; 159 sysinfo.mem_unit = 1; 160 161 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 162 } 163 164 int 165 linux_alarm(struct thread *td, struct linux_alarm_args *args) 166 { 167 struct itimerval it, old_it; 168 u_int secs; 169 int error; 170 171 #ifdef DEBUG 172 if (ldebug(alarm)) 173 printf(ARGS(alarm, "%u"), args->secs); 174 #endif 175 176 secs = args->secs; 177 178 if (secs > INT_MAX) 179 secs = INT_MAX; 180 181 it.it_value.tv_sec = (long) secs; 182 it.it_value.tv_usec = 0; 183 it.it_interval.tv_sec = 0; 184 it.it_interval.tv_usec = 0; 185 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 186 if (error) 187 return (error); 188 if (timevalisset(&old_it.it_value)) { 189 if (old_it.it_value.tv_usec != 0) 190 old_it.it_value.tv_sec++; 191 td->td_retval[0] = old_it.it_value.tv_sec; 192 } 193 return (0); 194 } 195 196 int 197 linux_brk(struct thread *td, struct linux_brk_args *args) 198 { 199 struct vmspace *vm = td->td_proc->p_vmspace; 200 vm_offset_t new, old; 201 struct obreak_args /* { 202 char * nsize; 203 } */ tmp; 204 205 #ifdef DEBUG 206 if (ldebug(brk)) 207 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 208 #endif 209 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 210 new = (vm_offset_t)args->dsend; 211 tmp.nsize = (char *)new; 212 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 213 td->td_retval[0] = (long)new; 214 else 215 td->td_retval[0] = (long)old; 216 217 return (0); 218 } 219 220 #if defined(__i386__) 221 /* XXX: what about amd64/linux32? */ 222 223 int 224 linux_uselib(struct thread *td, struct linux_uselib_args *args) 225 { 226 struct nameidata ni; 227 struct vnode *vp; 228 struct exec *a_out; 229 struct vattr attr; 230 vm_offset_t vmaddr; 231 unsigned long file_offset; 232 unsigned long bss_size; 233 char *library; 234 ssize_t aresid; 235 int error; 236 int locked, vfslocked; 237 238 LCONVPATHEXIST(td, args->library, &library); 239 240 #ifdef DEBUG 241 if (ldebug(uselib)) 242 printf(ARGS(uselib, "%s"), library); 243 #endif 244 245 a_out = NULL; 246 vfslocked = 0; 247 locked = 0; 248 vp = NULL; 249 250 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | MPSAFE | AUDITVNODE1, 251 UIO_SYSSPACE, library, td); 252 error = namei(&ni); 253 LFREEPATH(library); 254 if (error) 255 goto cleanup; 256 257 vp = ni.ni_vp; 258 vfslocked = NDHASGIANT(&ni); 259 NDFREE(&ni, NDF_ONLY_PNBUF); 260 261 /* 262 * From here on down, we have a locked vnode that must be unlocked. 263 * XXX: The code below largely duplicates exec_check_permissions(). 264 */ 265 locked = 1; 266 267 /* Writable? */ 268 if (vp->v_writecount) { 269 error = ETXTBSY; 270 goto cleanup; 271 } 272 273 /* Executable? */ 274 error = VOP_GETATTR(vp, &attr, td->td_ucred); 275 if (error) 276 goto cleanup; 277 278 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 279 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 280 /* EACCESS is what exec(2) returns. */ 281 error = ENOEXEC; 282 goto cleanup; 283 } 284 285 /* Sensible size? */ 286 if (attr.va_size == 0) { 287 error = ENOEXEC; 288 goto cleanup; 289 } 290 291 /* Can we access it? */ 292 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 293 if (error) 294 goto cleanup; 295 296 /* 297 * XXX: This should use vn_open() so that it is properly authorized, 298 * and to reduce code redundancy all over the place here. 299 * XXX: Not really, it duplicates far more of exec_check_permissions() 300 * than vn_open(). 301 */ 302 #ifdef MAC 303 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 304 if (error) 305 goto cleanup; 306 #endif 307 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 308 if (error) 309 goto cleanup; 310 311 /* Pull in executable header into exec_map */ 312 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 313 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 314 if (error) 315 goto cleanup; 316 317 /* Is it a Linux binary ? */ 318 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 319 error = ENOEXEC; 320 goto cleanup; 321 } 322 323 /* 324 * While we are here, we should REALLY do some more checks 325 */ 326 327 /* Set file/virtual offset based on a.out variant. */ 328 switch ((int)(a_out->a_magic & 0xffff)) { 329 case 0413: /* ZMAGIC */ 330 file_offset = 1024; 331 break; 332 case 0314: /* QMAGIC */ 333 file_offset = 0; 334 break; 335 default: 336 error = ENOEXEC; 337 goto cleanup; 338 } 339 340 bss_size = round_page(a_out->a_bss); 341 342 /* Check various fields in header for validity/bounds. */ 343 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 344 error = ENOEXEC; 345 goto cleanup; 346 } 347 348 /* text + data can't exceed file size */ 349 if (a_out->a_data + a_out->a_text > attr.va_size) { 350 error = EFAULT; 351 goto cleanup; 352 } 353 354 /* 355 * text/data/bss must not exceed limits 356 * XXX - this is not complete. it should check current usage PLUS 357 * the resources needed by this library. 358 */ 359 PROC_LOCK(td->td_proc); 360 if (a_out->a_text > maxtsiz || 361 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA) || 362 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 363 bss_size) != 0) { 364 PROC_UNLOCK(td->td_proc); 365 error = ENOMEM; 366 goto cleanup; 367 } 368 PROC_UNLOCK(td->td_proc); 369 370 /* 371 * Prevent more writers. 372 * XXX: Note that if any of the VM operations fail below we don't 373 * clear this flag. 374 */ 375 vp->v_vflag |= VV_TEXT; 376 377 /* 378 * Lock no longer needed 379 */ 380 locked = 0; 381 VOP_UNLOCK(vp, 0); 382 VFS_UNLOCK_GIANT(vfslocked); 383 384 /* 385 * Check if file_offset page aligned. Currently we cannot handle 386 * misalinged file offsets, and so we read in the entire image 387 * (what a waste). 388 */ 389 if (file_offset & PAGE_MASK) { 390 #ifdef DEBUG 391 printf("uselib: Non page aligned binary %lu\n", file_offset); 392 #endif 393 /* Map text+data read/write/execute */ 394 395 /* a_entry is the load address and is page aligned */ 396 vmaddr = trunc_page(a_out->a_entry); 397 398 /* get anon user mapping, read+write+execute */ 399 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 400 &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, 401 VM_PROT_ALL, 0); 402 if (error) 403 goto cleanup; 404 405 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 406 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 407 td->td_ucred, NOCRED, &aresid, td); 408 if (error != 0) 409 goto cleanup; 410 if (aresid != 0) { 411 error = ENOEXEC; 412 goto cleanup; 413 } 414 } else { 415 #ifdef DEBUG 416 printf("uselib: Page aligned binary %lu\n", file_offset); 417 #endif 418 /* 419 * for QMAGIC, a_entry is 20 bytes beyond the load address 420 * to skip the executable header 421 */ 422 vmaddr = trunc_page(a_out->a_entry); 423 424 /* 425 * Map it all into the process's space as a single 426 * copy-on-write "data" segment. 427 */ 428 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 429 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 430 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 431 if (error) 432 goto cleanup; 433 } 434 #ifdef DEBUG 435 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 436 ((long *)vmaddr)[1]); 437 #endif 438 if (bss_size != 0) { 439 /* Calculate BSS start address */ 440 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 441 a_out->a_data; 442 443 /* allocate some 'anon' space */ 444 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 445 &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); 446 if (error) 447 goto cleanup; 448 } 449 450 cleanup: 451 /* Unlock vnode if needed */ 452 if (locked) { 453 VOP_UNLOCK(vp, 0); 454 VFS_UNLOCK_GIANT(vfslocked); 455 } 456 457 /* Release the temporary mapping. */ 458 if (a_out) 459 kmem_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 460 461 return (error); 462 } 463 464 #endif /* __i386__ */ 465 466 int 467 linux_select(struct thread *td, struct linux_select_args *args) 468 { 469 l_timeval ltv; 470 struct timeval tv0, tv1, utv, *tvp; 471 int error; 472 473 #ifdef DEBUG 474 if (ldebug(select)) 475 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 476 (void *)args->readfds, (void *)args->writefds, 477 (void *)args->exceptfds, (void *)args->timeout); 478 #endif 479 480 /* 481 * Store current time for computation of the amount of 482 * time left. 483 */ 484 if (args->timeout) { 485 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 486 goto select_out; 487 utv.tv_sec = ltv.tv_sec; 488 utv.tv_usec = ltv.tv_usec; 489 #ifdef DEBUG 490 if (ldebug(select)) 491 printf(LMSG("incoming timeout (%jd/%ld)"), 492 (intmax_t)utv.tv_sec, utv.tv_usec); 493 #endif 494 495 if (itimerfix(&utv)) { 496 /* 497 * The timeval was invalid. Convert it to something 498 * valid that will act as it does under Linux. 499 */ 500 utv.tv_sec += utv.tv_usec / 1000000; 501 utv.tv_usec %= 1000000; 502 if (utv.tv_usec < 0) { 503 utv.tv_sec -= 1; 504 utv.tv_usec += 1000000; 505 } 506 if (utv.tv_sec < 0) 507 timevalclear(&utv); 508 } 509 microtime(&tv0); 510 tvp = &utv; 511 } else 512 tvp = NULL; 513 514 error = kern_select(td, args->nfds, args->readfds, args->writefds, 515 args->exceptfds, tvp, sizeof(l_int) * 8); 516 517 #ifdef DEBUG 518 if (ldebug(select)) 519 printf(LMSG("real select returns %d"), error); 520 #endif 521 if (error) 522 goto select_out; 523 524 if (args->timeout) { 525 if (td->td_retval[0]) { 526 /* 527 * Compute how much time was left of the timeout, 528 * by subtracting the current time and the time 529 * before we started the call, and subtracting 530 * that result from the user-supplied value. 531 */ 532 microtime(&tv1); 533 timevalsub(&tv1, &tv0); 534 timevalsub(&utv, &tv1); 535 if (utv.tv_sec < 0) 536 timevalclear(&utv); 537 } else 538 timevalclear(&utv); 539 #ifdef DEBUG 540 if (ldebug(select)) 541 printf(LMSG("outgoing timeout (%jd/%ld)"), 542 (intmax_t)utv.tv_sec, utv.tv_usec); 543 #endif 544 ltv.tv_sec = utv.tv_sec; 545 ltv.tv_usec = utv.tv_usec; 546 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 547 goto select_out; 548 } 549 550 select_out: 551 #ifdef DEBUG 552 if (ldebug(select)) 553 printf(LMSG("select_out -> %d"), error); 554 #endif 555 return (error); 556 } 557 558 int 559 linux_mremap(struct thread *td, struct linux_mremap_args *args) 560 { 561 struct munmap_args /* { 562 void *addr; 563 size_t len; 564 } */ bsd_args; 565 int error = 0; 566 567 #ifdef DEBUG 568 if (ldebug(mremap)) 569 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 570 (void *)(uintptr_t)args->addr, 571 (unsigned long)args->old_len, 572 (unsigned long)args->new_len, 573 (unsigned long)args->flags); 574 #endif 575 576 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 577 td->td_retval[0] = 0; 578 return (EINVAL); 579 } 580 581 /* 582 * Check for the page alignment. 583 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 584 */ 585 if (args->addr & PAGE_MASK) { 586 td->td_retval[0] = 0; 587 return (EINVAL); 588 } 589 590 args->new_len = round_page(args->new_len); 591 args->old_len = round_page(args->old_len); 592 593 if (args->new_len > args->old_len) { 594 td->td_retval[0] = 0; 595 return (ENOMEM); 596 } 597 598 if (args->new_len < args->old_len) { 599 bsd_args.addr = 600 (caddr_t)((uintptr_t)args->addr + args->new_len); 601 bsd_args.len = args->old_len - args->new_len; 602 error = sys_munmap(td, &bsd_args); 603 } 604 605 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 606 return (error); 607 } 608 609 #define LINUX_MS_ASYNC 0x0001 610 #define LINUX_MS_INVALIDATE 0x0002 611 #define LINUX_MS_SYNC 0x0004 612 613 int 614 linux_msync(struct thread *td, struct linux_msync_args *args) 615 { 616 struct msync_args bsd_args; 617 618 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 619 bsd_args.len = (uintptr_t)args->len; 620 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 621 622 return (sys_msync(td, &bsd_args)); 623 } 624 625 int 626 linux_time(struct thread *td, struct linux_time_args *args) 627 { 628 struct timeval tv; 629 l_time_t tm; 630 int error; 631 632 #ifdef DEBUG 633 if (ldebug(time)) 634 printf(ARGS(time, "*")); 635 #endif 636 637 microtime(&tv); 638 tm = tv.tv_sec; 639 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 640 return (error); 641 td->td_retval[0] = tm; 642 return (0); 643 } 644 645 struct l_times_argv { 646 l_clock_t tms_utime; 647 l_clock_t tms_stime; 648 l_clock_t tms_cutime; 649 l_clock_t tms_cstime; 650 }; 651 652 653 /* 654 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 655 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 656 * auxiliary vector entry. 657 */ 658 #define CLK_TCK 100 659 660 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 661 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 662 663 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 664 CONVNTCK(r) : CONVOTCK(r)) 665 666 int 667 linux_times(struct thread *td, struct linux_times_args *args) 668 { 669 struct timeval tv, utime, stime, cutime, cstime; 670 struct l_times_argv tms; 671 struct proc *p; 672 int error; 673 674 #ifdef DEBUG 675 if (ldebug(times)) 676 printf(ARGS(times, "*")); 677 #endif 678 679 if (args->buf != NULL) { 680 p = td->td_proc; 681 PROC_LOCK(p); 682 PROC_SLOCK(p); 683 calcru(p, &utime, &stime); 684 PROC_SUNLOCK(p); 685 calccru(p, &cutime, &cstime); 686 PROC_UNLOCK(p); 687 688 tms.tms_utime = CONVTCK(utime); 689 tms.tms_stime = CONVTCK(stime); 690 691 tms.tms_cutime = CONVTCK(cutime); 692 tms.tms_cstime = CONVTCK(cstime); 693 694 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 695 return (error); 696 } 697 698 microuptime(&tv); 699 td->td_retval[0] = (int)CONVTCK(tv); 700 return (0); 701 } 702 703 int 704 linux_newuname(struct thread *td, struct linux_newuname_args *args) 705 { 706 struct l_new_utsname utsname; 707 char osname[LINUX_MAX_UTSNAME]; 708 char osrelease[LINUX_MAX_UTSNAME]; 709 char *p; 710 711 #ifdef DEBUG 712 if (ldebug(newuname)) 713 printf(ARGS(newuname, "*")); 714 #endif 715 716 linux_get_osname(td, osname); 717 linux_get_osrelease(td, osrelease); 718 719 bzero(&utsname, sizeof(utsname)); 720 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 721 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 722 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 723 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 724 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 725 for (p = utsname.version; *p != '\0'; ++p) 726 if (*p == '\n') { 727 *p = '\0'; 728 break; 729 } 730 strlcpy(utsname.machine, linux_platform, LINUX_MAX_UTSNAME); 731 732 return (copyout(&utsname, args->buf, sizeof(utsname))); 733 } 734 735 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 736 struct l_utimbuf { 737 l_time_t l_actime; 738 l_time_t l_modtime; 739 }; 740 741 int 742 linux_utime(struct thread *td, struct linux_utime_args *args) 743 { 744 struct timeval tv[2], *tvp; 745 struct l_utimbuf lut; 746 char *fname; 747 int error; 748 749 LCONVPATHEXIST(td, args->fname, &fname); 750 751 #ifdef DEBUG 752 if (ldebug(utime)) 753 printf(ARGS(utime, "%s, *"), fname); 754 #endif 755 756 if (args->times) { 757 if ((error = copyin(args->times, &lut, sizeof lut))) { 758 LFREEPATH(fname); 759 return (error); 760 } 761 tv[0].tv_sec = lut.l_actime; 762 tv[0].tv_usec = 0; 763 tv[1].tv_sec = lut.l_modtime; 764 tv[1].tv_usec = 0; 765 tvp = tv; 766 } else 767 tvp = NULL; 768 769 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 770 LFREEPATH(fname); 771 return (error); 772 } 773 774 int 775 linux_utimes(struct thread *td, struct linux_utimes_args *args) 776 { 777 l_timeval ltv[2]; 778 struct timeval tv[2], *tvp = NULL; 779 char *fname; 780 int error; 781 782 LCONVPATHEXIST(td, args->fname, &fname); 783 784 #ifdef DEBUG 785 if (ldebug(utimes)) 786 printf(ARGS(utimes, "%s, *"), fname); 787 #endif 788 789 if (args->tptr != NULL) { 790 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 791 LFREEPATH(fname); 792 return (error); 793 } 794 tv[0].tv_sec = ltv[0].tv_sec; 795 tv[0].tv_usec = ltv[0].tv_usec; 796 tv[1].tv_sec = ltv[1].tv_sec; 797 tv[1].tv_usec = ltv[1].tv_usec; 798 tvp = tv; 799 } 800 801 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 802 LFREEPATH(fname); 803 return (error); 804 } 805 806 int 807 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 808 { 809 l_timeval ltv[2]; 810 struct timeval tv[2], *tvp = NULL; 811 char *fname; 812 int error, dfd; 813 814 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 815 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 816 817 #ifdef DEBUG 818 if (ldebug(futimesat)) 819 printf(ARGS(futimesat, "%s, *"), fname); 820 #endif 821 822 if (args->utimes != NULL) { 823 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 824 LFREEPATH(fname); 825 return (error); 826 } 827 tv[0].tv_sec = ltv[0].tv_sec; 828 tv[0].tv_usec = ltv[0].tv_usec; 829 tv[1].tv_sec = ltv[1].tv_sec; 830 tv[1].tv_usec = ltv[1].tv_usec; 831 tvp = tv; 832 } 833 834 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 835 LFREEPATH(fname); 836 return (error); 837 } 838 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 839 840 int 841 linux_common_wait(struct thread *td, int pid, int *status, 842 int options, struct rusage *ru) 843 { 844 int error, tmpstat; 845 846 error = kern_wait(td, pid, &tmpstat, options, ru); 847 if (error) 848 return (error); 849 850 if (status) { 851 tmpstat &= 0xffff; 852 if (WIFSIGNALED(tmpstat)) 853 tmpstat = (tmpstat & 0xffffff80) | 854 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 855 else if (WIFSTOPPED(tmpstat)) 856 tmpstat = (tmpstat & 0xffff00ff) | 857 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 858 error = copyout(&tmpstat, status, sizeof(int)); 859 } 860 861 return (error); 862 } 863 864 int 865 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 866 { 867 int options; 868 869 #ifdef DEBUG 870 if (ldebug(waitpid)) 871 printf(ARGS(waitpid, "%d, %p, %d"), 872 args->pid, (void *)args->status, args->options); 873 #endif 874 /* 875 * this is necessary because the test in kern_wait doesn't work 876 * because we mess with the options here 877 */ 878 if (args->options & ~(WUNTRACED | WNOHANG | WCONTINUED | __WCLONE)) 879 return (EINVAL); 880 881 options = (args->options & (WNOHANG | WUNTRACED)); 882 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 883 if (args->options & __WCLONE) 884 options |= WLINUXCLONE; 885 886 return (linux_common_wait(td, args->pid, args->status, options, NULL)); 887 } 888 889 890 int 891 linux_mknod(struct thread *td, struct linux_mknod_args *args) 892 { 893 char *path; 894 int error; 895 896 LCONVPATHCREAT(td, args->path, &path); 897 898 #ifdef DEBUG 899 if (ldebug(mknod)) 900 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 901 #endif 902 903 switch (args->mode & S_IFMT) { 904 case S_IFIFO: 905 case S_IFSOCK: 906 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 907 break; 908 909 case S_IFCHR: 910 case S_IFBLK: 911 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 912 args->dev); 913 break; 914 915 case S_IFDIR: 916 error = EPERM; 917 break; 918 919 case 0: 920 args->mode |= S_IFREG; 921 /* FALLTHROUGH */ 922 case S_IFREG: 923 error = kern_open(td, path, UIO_SYSSPACE, 924 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 925 if (error == 0) 926 kern_close(td, td->td_retval[0]); 927 break; 928 929 default: 930 error = EINVAL; 931 break; 932 } 933 LFREEPATH(path); 934 return (error); 935 } 936 937 int 938 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 939 { 940 char *path; 941 int error, dfd; 942 943 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 944 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 945 946 #ifdef DEBUG 947 if (ldebug(mknodat)) 948 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 949 #endif 950 951 switch (args->mode & S_IFMT) { 952 case S_IFIFO: 953 case S_IFSOCK: 954 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 955 break; 956 957 case S_IFCHR: 958 case S_IFBLK: 959 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 960 args->dev); 961 break; 962 963 case S_IFDIR: 964 error = EPERM; 965 break; 966 967 case 0: 968 args->mode |= S_IFREG; 969 /* FALLTHROUGH */ 970 case S_IFREG: 971 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 972 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 973 if (error == 0) 974 kern_close(td, td->td_retval[0]); 975 break; 976 977 default: 978 error = EINVAL; 979 break; 980 } 981 LFREEPATH(path); 982 return (error); 983 } 984 985 /* 986 * UGH! This is just about the dumbest idea I've ever heard!! 987 */ 988 int 989 linux_personality(struct thread *td, struct linux_personality_args *args) 990 { 991 #ifdef DEBUG 992 if (ldebug(personality)) 993 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 994 #endif 995 if (args->per != 0) 996 return (EINVAL); 997 998 /* Yes Jim, it's still a Linux... */ 999 td->td_retval[0] = 0; 1000 return (0); 1001 } 1002 1003 struct l_itimerval { 1004 l_timeval it_interval; 1005 l_timeval it_value; 1006 }; 1007 1008 #define B2L_ITIMERVAL(bip, lip) \ 1009 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1010 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1011 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1012 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1013 1014 int 1015 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1016 { 1017 int error; 1018 struct l_itimerval ls; 1019 struct itimerval aitv, oitv; 1020 1021 #ifdef DEBUG 1022 if (ldebug(setitimer)) 1023 printf(ARGS(setitimer, "%p, %p"), 1024 (void *)uap->itv, (void *)uap->oitv); 1025 #endif 1026 1027 if (uap->itv == NULL) { 1028 uap->itv = uap->oitv; 1029 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1030 } 1031 1032 error = copyin(uap->itv, &ls, sizeof(ls)); 1033 if (error != 0) 1034 return (error); 1035 B2L_ITIMERVAL(&aitv, &ls); 1036 #ifdef DEBUG 1037 if (ldebug(setitimer)) { 1038 printf("setitimer: value: sec: %jd, usec: %ld\n", 1039 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1040 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1041 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1042 } 1043 #endif 1044 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1045 if (error != 0 || uap->oitv == NULL) 1046 return (error); 1047 B2L_ITIMERVAL(&ls, &oitv); 1048 1049 return (copyout(&ls, uap->oitv, sizeof(ls))); 1050 } 1051 1052 int 1053 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1054 { 1055 int error; 1056 struct l_itimerval ls; 1057 struct itimerval aitv; 1058 1059 #ifdef DEBUG 1060 if (ldebug(getitimer)) 1061 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1062 #endif 1063 error = kern_getitimer(td, uap->which, &aitv); 1064 if (error != 0) 1065 return (error); 1066 B2L_ITIMERVAL(&ls, &aitv); 1067 return (copyout(&ls, uap->itv, sizeof(ls))); 1068 } 1069 1070 int 1071 linux_nice(struct thread *td, struct linux_nice_args *args) 1072 { 1073 struct setpriority_args bsd_args; 1074 1075 bsd_args.which = PRIO_PROCESS; 1076 bsd_args.who = 0; /* current process */ 1077 bsd_args.prio = args->inc; 1078 return (sys_setpriority(td, &bsd_args)); 1079 } 1080 1081 int 1082 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1083 { 1084 struct ucred *newcred, *oldcred; 1085 l_gid_t *linux_gidset; 1086 gid_t *bsd_gidset; 1087 int ngrp, error; 1088 struct proc *p; 1089 1090 ngrp = args->gidsetsize; 1091 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1092 return (EINVAL); 1093 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_TEMP, M_WAITOK); 1094 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1095 if (error) 1096 goto out; 1097 newcred = crget(); 1098 p = td->td_proc; 1099 PROC_LOCK(p); 1100 oldcred = crcopysafe(p, newcred); 1101 1102 /* 1103 * cr_groups[0] holds egid. Setting the whole set from 1104 * the supplied set will cause egid to be changed too. 1105 * Keep cr_groups[0] unchanged to prevent that. 1106 */ 1107 1108 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1109 PROC_UNLOCK(p); 1110 crfree(newcred); 1111 goto out; 1112 } 1113 1114 if (ngrp > 0) { 1115 newcred->cr_ngroups = ngrp + 1; 1116 1117 bsd_gidset = newcred->cr_groups; 1118 ngrp--; 1119 while (ngrp >= 0) { 1120 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1121 ngrp--; 1122 } 1123 } else 1124 newcred->cr_ngroups = 1; 1125 1126 setsugid(p); 1127 p->p_ucred = newcred; 1128 PROC_UNLOCK(p); 1129 crfree(oldcred); 1130 error = 0; 1131 out: 1132 free(linux_gidset, M_TEMP); 1133 return (error); 1134 } 1135 1136 int 1137 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1138 { 1139 struct ucred *cred; 1140 l_gid_t *linux_gidset; 1141 gid_t *bsd_gidset; 1142 int bsd_gidsetsz, ngrp, error; 1143 1144 cred = td->td_ucred; 1145 bsd_gidset = cred->cr_groups; 1146 bsd_gidsetsz = cred->cr_ngroups - 1; 1147 1148 /* 1149 * cr_groups[0] holds egid. Returning the whole set 1150 * here will cause a duplicate. Exclude cr_groups[0] 1151 * to prevent that. 1152 */ 1153 1154 if ((ngrp = args->gidsetsize) == 0) { 1155 td->td_retval[0] = bsd_gidsetsz; 1156 return (0); 1157 } 1158 1159 if (ngrp < bsd_gidsetsz) 1160 return (EINVAL); 1161 1162 ngrp = 0; 1163 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1164 M_TEMP, M_WAITOK); 1165 while (ngrp < bsd_gidsetsz) { 1166 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1167 ngrp++; 1168 } 1169 1170 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1171 free(linux_gidset, M_TEMP); 1172 if (error) 1173 return (error); 1174 1175 td->td_retval[0] = ngrp; 1176 return (0); 1177 } 1178 1179 int 1180 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1181 { 1182 struct rlimit bsd_rlim; 1183 struct l_rlimit rlim; 1184 u_int which; 1185 int error; 1186 1187 #ifdef DEBUG 1188 if (ldebug(setrlimit)) 1189 printf(ARGS(setrlimit, "%d, %p"), 1190 args->resource, (void *)args->rlim); 1191 #endif 1192 1193 if (args->resource >= LINUX_RLIM_NLIMITS) 1194 return (EINVAL); 1195 1196 which = linux_to_bsd_resource[args->resource]; 1197 if (which == -1) 1198 return (EINVAL); 1199 1200 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1201 if (error) 1202 return (error); 1203 1204 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1205 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1206 return (kern_setrlimit(td, which, &bsd_rlim)); 1207 } 1208 1209 int 1210 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1211 { 1212 struct l_rlimit rlim; 1213 struct proc *p = td->td_proc; 1214 struct rlimit bsd_rlim; 1215 u_int which; 1216 1217 #ifdef DEBUG 1218 if (ldebug(old_getrlimit)) 1219 printf(ARGS(old_getrlimit, "%d, %p"), 1220 args->resource, (void *)args->rlim); 1221 #endif 1222 1223 if (args->resource >= LINUX_RLIM_NLIMITS) 1224 return (EINVAL); 1225 1226 which = linux_to_bsd_resource[args->resource]; 1227 if (which == -1) 1228 return (EINVAL); 1229 1230 PROC_LOCK(p); 1231 lim_rlimit(p, which, &bsd_rlim); 1232 PROC_UNLOCK(p); 1233 1234 #ifdef COMPAT_LINUX32 1235 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1236 if (rlim.rlim_cur == UINT_MAX) 1237 rlim.rlim_cur = INT_MAX; 1238 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1239 if (rlim.rlim_max == UINT_MAX) 1240 rlim.rlim_max = INT_MAX; 1241 #else 1242 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1243 if (rlim.rlim_cur == ULONG_MAX) 1244 rlim.rlim_cur = LONG_MAX; 1245 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1246 if (rlim.rlim_max == ULONG_MAX) 1247 rlim.rlim_max = LONG_MAX; 1248 #endif 1249 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1250 } 1251 1252 int 1253 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1254 { 1255 struct l_rlimit rlim; 1256 struct proc *p = td->td_proc; 1257 struct rlimit bsd_rlim; 1258 u_int which; 1259 1260 #ifdef DEBUG 1261 if (ldebug(getrlimit)) 1262 printf(ARGS(getrlimit, "%d, %p"), 1263 args->resource, (void *)args->rlim); 1264 #endif 1265 1266 if (args->resource >= LINUX_RLIM_NLIMITS) 1267 return (EINVAL); 1268 1269 which = linux_to_bsd_resource[args->resource]; 1270 if (which == -1) 1271 return (EINVAL); 1272 1273 PROC_LOCK(p); 1274 lim_rlimit(p, which, &bsd_rlim); 1275 PROC_UNLOCK(p); 1276 1277 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1278 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1279 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1280 } 1281 1282 int 1283 linux_sched_setscheduler(struct thread *td, 1284 struct linux_sched_setscheduler_args *args) 1285 { 1286 struct sched_setscheduler_args bsd; 1287 1288 #ifdef DEBUG 1289 if (ldebug(sched_setscheduler)) 1290 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1291 args->pid, args->policy, (const void *)args->param); 1292 #endif 1293 1294 switch (args->policy) { 1295 case LINUX_SCHED_OTHER: 1296 bsd.policy = SCHED_OTHER; 1297 break; 1298 case LINUX_SCHED_FIFO: 1299 bsd.policy = SCHED_FIFO; 1300 break; 1301 case LINUX_SCHED_RR: 1302 bsd.policy = SCHED_RR; 1303 break; 1304 default: 1305 return (EINVAL); 1306 } 1307 1308 bsd.pid = args->pid; 1309 bsd.param = (struct sched_param *)args->param; 1310 return (sys_sched_setscheduler(td, &bsd)); 1311 } 1312 1313 int 1314 linux_sched_getscheduler(struct thread *td, 1315 struct linux_sched_getscheduler_args *args) 1316 { 1317 struct sched_getscheduler_args bsd; 1318 int error; 1319 1320 #ifdef DEBUG 1321 if (ldebug(sched_getscheduler)) 1322 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1323 #endif 1324 1325 bsd.pid = args->pid; 1326 error = sys_sched_getscheduler(td, &bsd); 1327 1328 switch (td->td_retval[0]) { 1329 case SCHED_OTHER: 1330 td->td_retval[0] = LINUX_SCHED_OTHER; 1331 break; 1332 case SCHED_FIFO: 1333 td->td_retval[0] = LINUX_SCHED_FIFO; 1334 break; 1335 case SCHED_RR: 1336 td->td_retval[0] = LINUX_SCHED_RR; 1337 break; 1338 } 1339 1340 return (error); 1341 } 1342 1343 int 1344 linux_sched_get_priority_max(struct thread *td, 1345 struct linux_sched_get_priority_max_args *args) 1346 { 1347 struct sched_get_priority_max_args bsd; 1348 1349 #ifdef DEBUG 1350 if (ldebug(sched_get_priority_max)) 1351 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1352 #endif 1353 1354 switch (args->policy) { 1355 case LINUX_SCHED_OTHER: 1356 bsd.policy = SCHED_OTHER; 1357 break; 1358 case LINUX_SCHED_FIFO: 1359 bsd.policy = SCHED_FIFO; 1360 break; 1361 case LINUX_SCHED_RR: 1362 bsd.policy = SCHED_RR; 1363 break; 1364 default: 1365 return (EINVAL); 1366 } 1367 return (sys_sched_get_priority_max(td, &bsd)); 1368 } 1369 1370 int 1371 linux_sched_get_priority_min(struct thread *td, 1372 struct linux_sched_get_priority_min_args *args) 1373 { 1374 struct sched_get_priority_min_args bsd; 1375 1376 #ifdef DEBUG 1377 if (ldebug(sched_get_priority_min)) 1378 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1379 #endif 1380 1381 switch (args->policy) { 1382 case LINUX_SCHED_OTHER: 1383 bsd.policy = SCHED_OTHER; 1384 break; 1385 case LINUX_SCHED_FIFO: 1386 bsd.policy = SCHED_FIFO; 1387 break; 1388 case LINUX_SCHED_RR: 1389 bsd.policy = SCHED_RR; 1390 break; 1391 default: 1392 return (EINVAL); 1393 } 1394 return (sys_sched_get_priority_min(td, &bsd)); 1395 } 1396 1397 #define REBOOT_CAD_ON 0x89abcdef 1398 #define REBOOT_CAD_OFF 0 1399 #define REBOOT_HALT 0xcdef0123 1400 #define REBOOT_RESTART 0x01234567 1401 #define REBOOT_RESTART2 0xA1B2C3D4 1402 #define REBOOT_POWEROFF 0x4321FEDC 1403 #define REBOOT_MAGIC1 0xfee1dead 1404 #define REBOOT_MAGIC2 0x28121969 1405 #define REBOOT_MAGIC2A 0x05121996 1406 #define REBOOT_MAGIC2B 0x16041998 1407 1408 int 1409 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1410 { 1411 struct reboot_args bsd_args; 1412 1413 #ifdef DEBUG 1414 if (ldebug(reboot)) 1415 printf(ARGS(reboot, "0x%x"), args->cmd); 1416 #endif 1417 1418 if (args->magic1 != REBOOT_MAGIC1) 1419 return (EINVAL); 1420 1421 switch (args->magic2) { 1422 case REBOOT_MAGIC2: 1423 case REBOOT_MAGIC2A: 1424 case REBOOT_MAGIC2B: 1425 break; 1426 default: 1427 return (EINVAL); 1428 } 1429 1430 switch (args->cmd) { 1431 case REBOOT_CAD_ON: 1432 case REBOOT_CAD_OFF: 1433 return (priv_check(td, PRIV_REBOOT)); 1434 case REBOOT_HALT: 1435 bsd_args.opt = RB_HALT; 1436 break; 1437 case REBOOT_RESTART: 1438 case REBOOT_RESTART2: 1439 bsd_args.opt = 0; 1440 break; 1441 case REBOOT_POWEROFF: 1442 bsd_args.opt = RB_POWEROFF; 1443 break; 1444 default: 1445 return (EINVAL); 1446 } 1447 return (sys_reboot(td, &bsd_args)); 1448 } 1449 1450 1451 /* 1452 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1453 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1454 * are assumed to be preserved. The following lightweight syscalls fixes 1455 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1456 * 1457 * linux_getpid() - MP SAFE 1458 * linux_getgid() - MP SAFE 1459 * linux_getuid() - MP SAFE 1460 */ 1461 1462 int 1463 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1464 { 1465 struct linux_emuldata *em; 1466 1467 #ifdef DEBUG 1468 if (ldebug(getpid)) 1469 printf(ARGS(getpid, "")); 1470 #endif 1471 1472 if (linux_use26(td)) { 1473 em = em_find(td->td_proc, EMUL_DONTLOCK); 1474 KASSERT(em != NULL, ("getpid: emuldata not found.\n")); 1475 td->td_retval[0] = em->shared->group_pid; 1476 } else { 1477 td->td_retval[0] = td->td_proc->p_pid; 1478 } 1479 1480 return (0); 1481 } 1482 1483 int 1484 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1485 { 1486 1487 #ifdef DEBUG 1488 if (ldebug(gettid)) 1489 printf(ARGS(gettid, "")); 1490 #endif 1491 1492 td->td_retval[0] = td->td_proc->p_pid; 1493 return (0); 1494 } 1495 1496 1497 int 1498 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1499 { 1500 struct linux_emuldata *em; 1501 struct proc *p, *pp; 1502 1503 #ifdef DEBUG 1504 if (ldebug(getppid)) 1505 printf(ARGS(getppid, "")); 1506 #endif 1507 1508 if (!linux_use26(td)) { 1509 PROC_LOCK(td->td_proc); 1510 td->td_retval[0] = td->td_proc->p_pptr->p_pid; 1511 PROC_UNLOCK(td->td_proc); 1512 return (0); 1513 } 1514 1515 em = em_find(td->td_proc, EMUL_DONTLOCK); 1516 1517 KASSERT(em != NULL, ("getppid: process emuldata not found.\n")); 1518 1519 /* find the group leader */ 1520 p = pfind(em->shared->group_pid); 1521 1522 if (p == NULL) { 1523 #ifdef DEBUG 1524 printf(LMSG("parent process not found.\n")); 1525 #endif 1526 return (0); 1527 } 1528 1529 pp = p->p_pptr; /* switch to parent */ 1530 PROC_LOCK(pp); 1531 PROC_UNLOCK(p); 1532 1533 /* if its also linux process */ 1534 if (pp->p_sysent == &elf_linux_sysvec) { 1535 em = em_find(pp, EMUL_DONTLOCK); 1536 KASSERT(em != NULL, ("getppid: parent emuldata not found.\n")); 1537 1538 td->td_retval[0] = em->shared->group_pid; 1539 } else 1540 td->td_retval[0] = pp->p_pid; 1541 1542 PROC_UNLOCK(pp); 1543 1544 return (0); 1545 } 1546 1547 int 1548 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1549 { 1550 1551 #ifdef DEBUG 1552 if (ldebug(getgid)) 1553 printf(ARGS(getgid, "")); 1554 #endif 1555 1556 td->td_retval[0] = td->td_ucred->cr_rgid; 1557 return (0); 1558 } 1559 1560 int 1561 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1562 { 1563 1564 #ifdef DEBUG 1565 if (ldebug(getuid)) 1566 printf(ARGS(getuid, "")); 1567 #endif 1568 1569 td->td_retval[0] = td->td_ucred->cr_ruid; 1570 return (0); 1571 } 1572 1573 1574 int 1575 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1576 { 1577 struct getsid_args bsd; 1578 1579 #ifdef DEBUG 1580 if (ldebug(getsid)) 1581 printf(ARGS(getsid, "%i"), args->pid); 1582 #endif 1583 1584 bsd.pid = args->pid; 1585 return (sys_getsid(td, &bsd)); 1586 } 1587 1588 int 1589 linux_nosys(struct thread *td, struct nosys_args *ignore) 1590 { 1591 1592 return (ENOSYS); 1593 } 1594 1595 int 1596 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1597 { 1598 struct getpriority_args bsd_args; 1599 int error; 1600 1601 #ifdef DEBUG 1602 if (ldebug(getpriority)) 1603 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1604 #endif 1605 1606 bsd_args.which = args->which; 1607 bsd_args.who = args->who; 1608 error = sys_getpriority(td, &bsd_args); 1609 td->td_retval[0] = 20 - td->td_retval[0]; 1610 return (error); 1611 } 1612 1613 int 1614 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1615 { 1616 int name[2]; 1617 1618 #ifdef DEBUG 1619 if (ldebug(sethostname)) 1620 printf(ARGS(sethostname, "*, %i"), args->len); 1621 #endif 1622 1623 name[0] = CTL_KERN; 1624 name[1] = KERN_HOSTNAME; 1625 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1626 args->len, 0, 0)); 1627 } 1628 1629 int 1630 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1631 { 1632 int name[2]; 1633 1634 #ifdef DEBUG 1635 if (ldebug(setdomainname)) 1636 printf(ARGS(setdomainname, "*, %i"), args->len); 1637 #endif 1638 1639 name[0] = CTL_KERN; 1640 name[1] = KERN_NISDOMAINNAME; 1641 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1642 args->len, 0, 0)); 1643 } 1644 1645 int 1646 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1647 { 1648 struct linux_emuldata *em; 1649 1650 #ifdef DEBUG 1651 if (ldebug(exit_group)) 1652 printf(ARGS(exit_group, "%i"), args->error_code); 1653 #endif 1654 1655 em = em_find(td->td_proc, EMUL_DONTLOCK); 1656 if (em->shared->refs > 1) { 1657 EMUL_SHARED_WLOCK(&emul_shared_lock); 1658 em->shared->flags |= EMUL_SHARED_HASXSTAT; 1659 em->shared->xstat = W_EXITCODE(args->error_code, 0); 1660 EMUL_SHARED_WUNLOCK(&emul_shared_lock); 1661 if (linux_use26(td)) 1662 linux_kill_threads(td, SIGKILL); 1663 } 1664 1665 /* 1666 * XXX: we should send a signal to the parent if 1667 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1668 * as it doesnt occur often. 1669 */ 1670 exit1(td, W_EXITCODE(args->error_code, 0)); 1671 1672 return (0); 1673 } 1674 1675 #define _LINUX_CAPABILITY_VERSION 0x19980330 1676 1677 struct l_user_cap_header { 1678 l_int version; 1679 l_int pid; 1680 }; 1681 1682 struct l_user_cap_data { 1683 l_int effective; 1684 l_int permitted; 1685 l_int inheritable; 1686 }; 1687 1688 int 1689 linux_capget(struct thread *td, struct linux_capget_args *args) 1690 { 1691 struct l_user_cap_header luch; 1692 struct l_user_cap_data lucd; 1693 int error; 1694 1695 if (args->hdrp == NULL) 1696 return (EFAULT); 1697 1698 error = copyin(args->hdrp, &luch, sizeof(luch)); 1699 if (error != 0) 1700 return (error); 1701 1702 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1703 luch.version = _LINUX_CAPABILITY_VERSION; 1704 error = copyout(&luch, args->hdrp, sizeof(luch)); 1705 if (error) 1706 return (error); 1707 return (EINVAL); 1708 } 1709 1710 if (luch.pid) 1711 return (EPERM); 1712 1713 if (args->datap) { 1714 /* 1715 * The current implementation doesn't support setting 1716 * a capability (it's essentially a stub) so indicate 1717 * that no capabilities are currently set or available 1718 * to request. 1719 */ 1720 bzero (&lucd, sizeof(lucd)); 1721 error = copyout(&lucd, args->datap, sizeof(lucd)); 1722 } 1723 1724 return (error); 1725 } 1726 1727 int 1728 linux_capset(struct thread *td, struct linux_capset_args *args) 1729 { 1730 struct l_user_cap_header luch; 1731 struct l_user_cap_data lucd; 1732 int error; 1733 1734 if (args->hdrp == NULL || args->datap == NULL) 1735 return (EFAULT); 1736 1737 error = copyin(args->hdrp, &luch, sizeof(luch)); 1738 if (error != 0) 1739 return (error); 1740 1741 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1742 luch.version = _LINUX_CAPABILITY_VERSION; 1743 error = copyout(&luch, args->hdrp, sizeof(luch)); 1744 if (error) 1745 return (error); 1746 return (EINVAL); 1747 } 1748 1749 if (luch.pid) 1750 return (EPERM); 1751 1752 error = copyin(args->datap, &lucd, sizeof(lucd)); 1753 if (error != 0) 1754 return (error); 1755 1756 /* We currently don't support setting any capabilities. */ 1757 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1758 linux_msg(td, 1759 "capset effective=0x%x, permitted=0x%x, " 1760 "inheritable=0x%x is not implemented", 1761 (int)lucd.effective, (int)lucd.permitted, 1762 (int)lucd.inheritable); 1763 return (EPERM); 1764 } 1765 1766 return (0); 1767 } 1768 1769 int 1770 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1771 { 1772 int error = 0, max_size; 1773 struct proc *p = td->td_proc; 1774 char comm[LINUX_MAX_COMM_LEN]; 1775 struct linux_emuldata *em; 1776 int pdeath_signal; 1777 1778 #ifdef DEBUG 1779 if (ldebug(prctl)) 1780 printf(ARGS(prctl, "%d, %d, %d, %d, %d"), args->option, 1781 args->arg2, args->arg3, args->arg4, args->arg5); 1782 #endif 1783 1784 switch (args->option) { 1785 case LINUX_PR_SET_PDEATHSIG: 1786 if (!LINUX_SIG_VALID(args->arg2)) 1787 return (EINVAL); 1788 em = em_find(p, EMUL_DOLOCK); 1789 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1790 em->pdeath_signal = args->arg2; 1791 EMUL_UNLOCK(&emul_lock); 1792 break; 1793 case LINUX_PR_GET_PDEATHSIG: 1794 em = em_find(p, EMUL_DOLOCK); 1795 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1796 pdeath_signal = em->pdeath_signal; 1797 EMUL_UNLOCK(&emul_lock); 1798 error = copyout(&pdeath_signal, 1799 (void *)(register_t)args->arg2, 1800 sizeof(pdeath_signal)); 1801 break; 1802 case LINUX_PR_GET_KEEPCAPS: 1803 /* 1804 * Indicate that we always clear the effective and 1805 * permitted capability sets when the user id becomes 1806 * non-zero (actually the capability sets are simply 1807 * always zero in the current implementation). 1808 */ 1809 td->td_retval[0] = 0; 1810 break; 1811 case LINUX_PR_SET_KEEPCAPS: 1812 /* 1813 * Ignore requests to keep the effective and permitted 1814 * capability sets when the user id becomes non-zero. 1815 */ 1816 break; 1817 case LINUX_PR_SET_NAME: 1818 /* 1819 * To be on the safe side we need to make sure to not 1820 * overflow the size a linux program expects. We already 1821 * do this here in the copyin, so that we don't need to 1822 * check on copyout. 1823 */ 1824 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1825 error = copyinstr((void *)(register_t)args->arg2, comm, 1826 max_size, NULL); 1827 1828 /* Linux silently truncates the name if it is too long. */ 1829 if (error == ENAMETOOLONG) { 1830 /* 1831 * XXX: copyinstr() isn't documented to populate the 1832 * array completely, so do a copyin() to be on the 1833 * safe side. This should be changed in case 1834 * copyinstr() is changed to guarantee this. 1835 */ 1836 error = copyin((void *)(register_t)args->arg2, comm, 1837 max_size - 1); 1838 comm[max_size - 1] = '\0'; 1839 } 1840 if (error) 1841 return (error); 1842 1843 PROC_LOCK(p); 1844 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1845 PROC_UNLOCK(p); 1846 break; 1847 case LINUX_PR_GET_NAME: 1848 PROC_LOCK(p); 1849 strlcpy(comm, p->p_comm, sizeof(comm)); 1850 PROC_UNLOCK(p); 1851 error = copyout(comm, (void *)(register_t)args->arg2, 1852 strlen(comm) + 1); 1853 break; 1854 default: 1855 error = EINVAL; 1856 break; 1857 } 1858 1859 return (error); 1860 } 1861 1862 /* 1863 * Get affinity of a process. 1864 */ 1865 int 1866 linux_sched_getaffinity(struct thread *td, 1867 struct linux_sched_getaffinity_args *args) 1868 { 1869 int error; 1870 struct cpuset_getaffinity_args cga; 1871 1872 #ifdef DEBUG 1873 if (ldebug(sched_getaffinity)) 1874 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 1875 args->len); 1876 #endif 1877 if (args->len < sizeof(cpuset_t)) 1878 return (EINVAL); 1879 1880 cga.level = CPU_LEVEL_WHICH; 1881 cga.which = CPU_WHICH_PID; 1882 cga.id = args->pid; 1883 cga.cpusetsize = sizeof(cpuset_t); 1884 cga.mask = (cpuset_t *) args->user_mask_ptr; 1885 1886 if ((error = sys_cpuset_getaffinity(td, &cga)) == 0) 1887 td->td_retval[0] = sizeof(cpuset_t); 1888 1889 return (error); 1890 } 1891 1892 /* 1893 * Set affinity of a process. 1894 */ 1895 int 1896 linux_sched_setaffinity(struct thread *td, 1897 struct linux_sched_setaffinity_args *args) 1898 { 1899 struct cpuset_setaffinity_args csa; 1900 1901 #ifdef DEBUG 1902 if (ldebug(sched_setaffinity)) 1903 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 1904 args->len); 1905 #endif 1906 if (args->len < sizeof(cpuset_t)) 1907 return (EINVAL); 1908 1909 csa.level = CPU_LEVEL_WHICH; 1910 csa.which = CPU_WHICH_PID; 1911 csa.id = args->pid; 1912 csa.cpusetsize = sizeof(cpuset_t); 1913 csa.mask = (cpuset_t *) args->user_mask_ptr; 1914 1915 return (sys_cpuset_setaffinity(td, &csa)); 1916 } 1917