1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 S�ren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_mac.h" 34 35 #include <sys/param.h> 36 #include <sys/blist.h> 37 #include <sys/fcntl.h> 38 #if defined(__i386__) || defined(__alpha__) 39 #include <sys/imgact_aout.h> 40 #endif 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/mac.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/namei.h> 51 #include <sys/proc.h> 52 #include <sys/reboot.h> 53 #include <sys/resourcevar.h> 54 #include <sys/signalvar.h> 55 #include <sys/stat.h> 56 #include <sys/syscallsubr.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysproto.h> 59 #include <sys/systm.h> 60 #include <sys/time.h> 61 #include <sys/vmmeter.h> 62 #include <sys/vnode.h> 63 #include <sys/wait.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_kern.h> 68 #include <vm/vm_map.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_object.h> 71 #include <vm/swap_pager.h> 72 73 #include <posix4/sched.h> 74 75 #include "opt_compat.h" 76 77 #if !COMPAT_LINUX32 78 #include <machine/../linux/linux.h> 79 #include <machine/../linux/linux_proto.h> 80 #else 81 #include <machine/../linux32/linux.h> 82 #include <machine/../linux32/linux32_proto.h> 83 #endif 84 85 #include <compat/linux/linux_mib.h> 86 #include <compat/linux/linux_util.h> 87 88 #ifdef __i386__ 89 #include <machine/cputypes.h> 90 #endif 91 92 #ifdef __alpha__ 93 #define BSD_TO_LINUX_SIGNAL(sig) (sig) 94 #else 95 #define BSD_TO_LINUX_SIGNAL(sig) \ 96 (((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig) 97 #endif 98 99 #ifndef __alpha__ 100 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 101 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 102 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 103 RLIMIT_MEMLOCK, -1 104 }; 105 #endif /*!__alpha__*/ 106 107 struct l_sysinfo { 108 l_long uptime; /* Seconds since boot */ 109 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 110 #define LINUX_SYSINFO_LOADS_SCALE 65536 111 l_ulong totalram; /* Total usable main memory size */ 112 l_ulong freeram; /* Available memory size */ 113 l_ulong sharedram; /* Amount of shared memory */ 114 l_ulong bufferram; /* Memory used by buffers */ 115 l_ulong totalswap; /* Total swap space size */ 116 l_ulong freeswap; /* swap space still available */ 117 l_ushort procs; /* Number of current processes */ 118 l_ulong totalbig; 119 l_ulong freebig; 120 l_uint mem_unit; 121 char _f[6]; /* Pads structure to 64 bytes */ 122 }; 123 #ifndef __alpha__ 124 int 125 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 126 { 127 struct l_sysinfo sysinfo; 128 vm_object_t object; 129 int i, j; 130 struct timespec ts; 131 132 /* Uptime is copied out of print_uptime() in kern_shutdown.c */ 133 getnanouptime(&ts); 134 i = 0; 135 if (ts.tv_sec >= 86400) { 136 ts.tv_sec %= 86400; 137 i = 1; 138 } 139 if (i || ts.tv_sec >= 3600) { 140 ts.tv_sec %= 3600; 141 i = 1; 142 } 143 if (i || ts.tv_sec >= 60) { 144 ts.tv_sec %= 60; 145 i = 1; 146 } 147 sysinfo.uptime=ts.tv_sec; 148 149 /* Use the information from the mib to get our load averages */ 150 for (i = 0; i < 3; i++) 151 sysinfo.loads[i] = averunnable.ldavg[i] * 152 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 153 154 sysinfo.totalram = physmem * PAGE_SIZE; 155 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 156 157 sysinfo.sharedram = 0; 158 mtx_lock(&vm_object_list_mtx); 159 TAILQ_FOREACH(object, &vm_object_list, object_list) 160 if (object->shadow_count > 1) 161 sysinfo.sharedram += object->resident_page_count; 162 mtx_unlock(&vm_object_list_mtx); 163 164 sysinfo.sharedram *= PAGE_SIZE; 165 sysinfo.bufferram = 0; 166 167 swap_pager_status(&i, &j); 168 sysinfo.totalswap= i * PAGE_SIZE; 169 sysinfo.freeswap = (i - j) * PAGE_SIZE; 170 171 sysinfo.procs = nprocs; 172 173 /* The following are only present in newer Linux kernels. */ 174 sysinfo.totalbig = 0; 175 sysinfo.freebig = 0; 176 sysinfo.mem_unit = 1; 177 178 return copyout(&sysinfo, args->info, sizeof(sysinfo)); 179 } 180 #endif /*!__alpha__*/ 181 182 #ifndef __alpha__ 183 int 184 linux_alarm(struct thread *td, struct linux_alarm_args *args) 185 { 186 struct itimerval it, old_it; 187 struct timeval tv; 188 struct proc *p; 189 190 #ifdef DEBUG 191 if (ldebug(alarm)) 192 printf(ARGS(alarm, "%u"), args->secs); 193 #endif 194 195 if (args->secs > 100000000) 196 return EINVAL; 197 198 it.it_value.tv_sec = (long)args->secs; 199 it.it_value.tv_usec = 0; 200 it.it_interval.tv_sec = 0; 201 it.it_interval.tv_usec = 0; 202 p = td->td_proc; 203 PROC_LOCK(p); 204 old_it = p->p_realtimer; 205 getmicrouptime(&tv); 206 if (timevalisset(&old_it.it_value)) 207 callout_stop(&p->p_itcallout); 208 if (it.it_value.tv_sec != 0) { 209 callout_reset(&p->p_itcallout, tvtohz(&it.it_value), 210 realitexpire, p); 211 timevaladd(&it.it_value, &tv); 212 } 213 p->p_realtimer = it; 214 PROC_UNLOCK(p); 215 if (timevalcmp(&old_it.it_value, &tv, >)) { 216 timevalsub(&old_it.it_value, &tv); 217 if (old_it.it_value.tv_usec != 0) 218 old_it.it_value.tv_sec++; 219 td->td_retval[0] = old_it.it_value.tv_sec; 220 } 221 return 0; 222 } 223 #endif /*!__alpha__*/ 224 225 int 226 linux_brk(struct thread *td, struct linux_brk_args *args) 227 { 228 struct vmspace *vm = td->td_proc->p_vmspace; 229 vm_offset_t new, old; 230 struct obreak_args /* { 231 char * nsize; 232 } */ tmp; 233 234 #ifdef DEBUG 235 if (ldebug(brk)) 236 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 237 #endif 238 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 239 new = (vm_offset_t)args->dsend; 240 tmp.nsize = (char *) new; 241 if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp)) 242 td->td_retval[0] = (long)new; 243 else 244 td->td_retval[0] = (long)old; 245 246 return 0; 247 } 248 249 #if defined(__i386__) || defined(__alpha__) 250 251 int 252 linux_uselib(struct thread *td, struct linux_uselib_args *args) 253 { 254 struct nameidata ni; 255 struct vnode *vp; 256 struct exec *a_out; 257 struct vattr attr; 258 vm_offset_t vmaddr; 259 unsigned long file_offset; 260 vm_offset_t buffer; 261 unsigned long bss_size; 262 char *library; 263 int error; 264 int locked; 265 266 LCONVPATHEXIST(td, args->library, &library); 267 268 #ifdef DEBUG 269 if (ldebug(uselib)) 270 printf(ARGS(uselib, "%s"), library); 271 #endif 272 273 a_out = NULL; 274 locked = 0; 275 vp = NULL; 276 277 /* 278 * XXX: This code should make use of vn_open(), rather than doing 279 * all this stuff itself. 280 */ 281 NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, library, td); 282 error = namei(&ni); 283 LFREEPATH(library); 284 if (error) 285 goto cleanup; 286 287 vp = ni.ni_vp; 288 /* 289 * XXX - This looks like a bogus check. A LOCKLEAF namei should not 290 * succeed without returning a vnode. 291 */ 292 if (vp == NULL) { 293 error = ENOEXEC; /* ?? */ 294 goto cleanup; 295 } 296 NDFREE(&ni, NDF_ONLY_PNBUF); 297 298 /* 299 * From here on down, we have a locked vnode that must be unlocked. 300 */ 301 locked++; 302 303 /* Writable? */ 304 if (vp->v_writecount) { 305 error = ETXTBSY; 306 goto cleanup; 307 } 308 309 /* Executable? */ 310 error = VOP_GETATTR(vp, &attr, td->td_ucred, td); 311 if (error) 312 goto cleanup; 313 314 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 315 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 316 error = ENOEXEC; 317 goto cleanup; 318 } 319 320 /* Sensible size? */ 321 if (attr.va_size == 0) { 322 error = ENOEXEC; 323 goto cleanup; 324 } 325 326 /* Can we access it? */ 327 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 328 if (error) 329 goto cleanup; 330 331 /* 332 * XXX: This should use vn_open() so that it is properly authorized, 333 * and to reduce code redundancy all over the place here. 334 */ 335 #ifdef MAC 336 error = mac_check_vnode_open(td->td_ucred, vp, FREAD); 337 if (error) 338 goto cleanup; 339 #endif 340 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1); 341 if (error) 342 goto cleanup; 343 344 /* Pull in executable header into kernel_map */ 345 error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE, 346 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0); 347 /* 348 * Lock no longer needed 349 */ 350 locked = 0; 351 VOP_UNLOCK(vp, 0, td); 352 353 if (error) 354 goto cleanup; 355 356 /* Is it a Linux binary ? */ 357 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 358 error = ENOEXEC; 359 goto cleanup; 360 } 361 362 /* 363 * While we are here, we should REALLY do some more checks 364 */ 365 366 /* Set file/virtual offset based on a.out variant. */ 367 switch ((int)(a_out->a_magic & 0xffff)) { 368 case 0413: /* ZMAGIC */ 369 file_offset = 1024; 370 break; 371 case 0314: /* QMAGIC */ 372 file_offset = 0; 373 break; 374 default: 375 error = ENOEXEC; 376 goto cleanup; 377 } 378 379 bss_size = round_page(a_out->a_bss); 380 381 /* Check various fields in header for validity/bounds. */ 382 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 383 error = ENOEXEC; 384 goto cleanup; 385 } 386 387 /* text + data can't exceed file size */ 388 if (a_out->a_data + a_out->a_text > attr.va_size) { 389 error = EFAULT; 390 goto cleanup; 391 } 392 393 /* 394 * text/data/bss must not exceed limits 395 * XXX - this is not complete. it should check current usage PLUS 396 * the resources needed by this library. 397 */ 398 PROC_LOCK(td->td_proc); 399 if (a_out->a_text > maxtsiz || 400 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA)) { 401 PROC_UNLOCK(td->td_proc); 402 error = ENOMEM; 403 goto cleanup; 404 } 405 PROC_UNLOCK(td->td_proc); 406 407 mp_fixme("Unlocked vflags access."); 408 /* prevent more writers */ 409 vp->v_vflag |= VV_TEXT; 410 411 /* 412 * Check if file_offset page aligned. Currently we cannot handle 413 * misalinged file offsets, and so we read in the entire image 414 * (what a waste). 415 */ 416 if (file_offset & PAGE_MASK) { 417 #ifdef DEBUG 418 printf("uselib: Non page aligned binary %lu\n", file_offset); 419 #endif 420 /* Map text+data read/write/execute */ 421 422 /* a_entry is the load address and is page aligned */ 423 vmaddr = trunc_page(a_out->a_entry); 424 425 /* get anon user mapping, read+write+execute */ 426 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 427 &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, 428 VM_PROT_ALL, 0); 429 if (error) 430 goto cleanup; 431 432 /* map file into kernel_map */ 433 error = vm_mmap(kernel_map, &buffer, 434 round_page(a_out->a_text + a_out->a_data + file_offset), 435 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 436 trunc_page(file_offset)); 437 if (error) 438 goto cleanup; 439 440 /* copy from kernel VM space to user space */ 441 error = copyout(PTRIN(buffer + file_offset), 442 (void *)vmaddr, a_out->a_text + a_out->a_data); 443 444 /* release temporary kernel space */ 445 vm_map_remove(kernel_map, buffer, buffer + 446 round_page(a_out->a_text + a_out->a_data + file_offset)); 447 448 if (error) 449 goto cleanup; 450 } else { 451 #ifdef DEBUG 452 printf("uselib: Page aligned binary %lu\n", file_offset); 453 #endif 454 /* 455 * for QMAGIC, a_entry is 20 bytes beyond the load address 456 * to skip the executable header 457 */ 458 vmaddr = trunc_page(a_out->a_entry); 459 460 /* 461 * Map it all into the process's space as a single 462 * copy-on-write "data" segment. 463 */ 464 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 465 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 466 MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset); 467 if (error) 468 goto cleanup; 469 } 470 #ifdef DEBUG 471 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0], 472 ((long*)vmaddr)[1]); 473 #endif 474 if (bss_size != 0) { 475 /* Calculate BSS start address */ 476 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 477 a_out->a_data; 478 479 /* allocate some 'anon' space */ 480 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 481 &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); 482 if (error) 483 goto cleanup; 484 } 485 486 cleanup: 487 /* Unlock vnode if needed */ 488 if (locked) 489 VOP_UNLOCK(vp, 0, td); 490 491 /* Release the kernel mapping. */ 492 if (a_out) 493 vm_map_remove(kernel_map, (vm_offset_t)a_out, 494 (vm_offset_t)a_out + PAGE_SIZE); 495 496 return error; 497 } 498 499 #endif /* __i386__ || __alpha__ */ 500 501 int 502 linux_select(struct thread *td, struct linux_select_args *args) 503 { 504 l_timeval ltv; 505 struct timeval tv0, tv1, utv, *tvp; 506 int error; 507 508 #ifdef DEBUG 509 if (ldebug(select)) 510 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 511 (void *)args->readfds, (void *)args->writefds, 512 (void *)args->exceptfds, (void *)args->timeout); 513 #endif 514 515 /* 516 * Store current time for computation of the amount of 517 * time left. 518 */ 519 if (args->timeout) { 520 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 521 goto select_out; 522 utv.tv_sec = ltv.tv_sec; 523 utv.tv_usec = ltv.tv_usec; 524 #ifdef DEBUG 525 if (ldebug(select)) 526 printf(LMSG("incoming timeout (%ld/%ld)"), 527 utv.tv_sec, utv.tv_usec); 528 #endif 529 530 if (itimerfix(&utv)) { 531 /* 532 * The timeval was invalid. Convert it to something 533 * valid that will act as it does under Linux. 534 */ 535 utv.tv_sec += utv.tv_usec / 1000000; 536 utv.tv_usec %= 1000000; 537 if (utv.tv_usec < 0) { 538 utv.tv_sec -= 1; 539 utv.tv_usec += 1000000; 540 } 541 if (utv.tv_sec < 0) 542 timevalclear(&utv); 543 } 544 microtime(&tv0); 545 tvp = &utv; 546 } else 547 tvp = NULL; 548 549 error = kern_select(td, args->nfds, args->readfds, args->writefds, 550 args->exceptfds, tvp); 551 552 #ifdef DEBUG 553 if (ldebug(select)) 554 printf(LMSG("real select returns %d"), error); 555 #endif 556 if (error) { 557 /* 558 * See fs/select.c in the Linux kernel. Without this, 559 * Maelstrom doesn't work. 560 */ 561 if (error == ERESTART) 562 error = EINTR; 563 goto select_out; 564 } 565 566 if (args->timeout) { 567 if (td->td_retval[0]) { 568 /* 569 * Compute how much time was left of the timeout, 570 * by subtracting the current time and the time 571 * before we started the call, and subtracting 572 * that result from the user-supplied value. 573 */ 574 microtime(&tv1); 575 timevalsub(&tv1, &tv0); 576 timevalsub(&utv, &tv1); 577 if (utv.tv_sec < 0) 578 timevalclear(&utv); 579 } else 580 timevalclear(&utv); 581 #ifdef DEBUG 582 if (ldebug(select)) 583 printf(LMSG("outgoing timeout (%ld/%ld)"), 584 utv.tv_sec, utv.tv_usec); 585 #endif 586 ltv.tv_sec = utv.tv_sec; 587 ltv.tv_usec = utv.tv_usec; 588 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 589 goto select_out; 590 } 591 592 select_out: 593 #ifdef DEBUG 594 if (ldebug(select)) 595 printf(LMSG("select_out -> %d"), error); 596 #endif 597 return error; 598 } 599 600 int 601 linux_mremap(struct thread *td, struct linux_mremap_args *args) 602 { 603 struct munmap_args /* { 604 void *addr; 605 size_t len; 606 } */ bsd_args; 607 int error = 0; 608 609 #ifdef DEBUG 610 if (ldebug(mremap)) 611 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 612 (void *)(uintptr_t)args->addr, 613 (unsigned long)args->old_len, 614 (unsigned long)args->new_len, 615 (unsigned long)args->flags); 616 #endif 617 args->new_len = round_page(args->new_len); 618 args->old_len = round_page(args->old_len); 619 620 if (args->new_len > args->old_len) { 621 td->td_retval[0] = 0; 622 return ENOMEM; 623 } 624 625 if (args->new_len < args->old_len) { 626 bsd_args.addr = 627 (caddr_t)((uintptr_t)args->addr + args->new_len); 628 bsd_args.len = args->old_len - args->new_len; 629 error = munmap(td, &bsd_args); 630 } 631 632 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 633 return error; 634 } 635 636 #define LINUX_MS_ASYNC 0x0001 637 #define LINUX_MS_INVALIDATE 0x0002 638 #define LINUX_MS_SYNC 0x0004 639 640 int 641 linux_msync(struct thread *td, struct linux_msync_args *args) 642 { 643 struct msync_args bsd_args; 644 645 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 646 bsd_args.len = (uintptr_t)args->len; 647 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 648 649 return msync(td, &bsd_args); 650 } 651 652 #ifndef __alpha__ 653 int 654 linux_time(struct thread *td, struct linux_time_args *args) 655 { 656 struct timeval tv; 657 l_time_t tm; 658 int error; 659 660 #ifdef DEBUG 661 if (ldebug(time)) 662 printf(ARGS(time, "*")); 663 #endif 664 665 microtime(&tv); 666 tm = tv.tv_sec; 667 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 668 return error; 669 td->td_retval[0] = tm; 670 return 0; 671 } 672 #endif /*!__alpha__*/ 673 674 struct l_times_argv { 675 l_long tms_utime; 676 l_long tms_stime; 677 l_long tms_cutime; 678 l_long tms_cstime; 679 }; 680 681 #ifdef __alpha__ 682 #define CLK_TCK 1024 /* Linux uses 1024 on alpha */ 683 #else 684 #define CLK_TCK 100 /* Linux uses 100 */ 685 #endif 686 687 #define CONVTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 688 689 int 690 linux_times(struct thread *td, struct linux_times_args *args) 691 { 692 struct timeval tv, utime, stime, cutime, cstime; 693 struct l_times_argv tms; 694 struct proc *p; 695 int error; 696 697 #ifdef DEBUG 698 if (ldebug(times)) 699 printf(ARGS(times, "*")); 700 #endif 701 702 p = td->td_proc; 703 PROC_LOCK(p); 704 calcru(p, &utime, &stime); 705 calccru(p, &cutime, &cstime); 706 PROC_UNLOCK(p); 707 708 tms.tms_utime = CONVTCK(utime); 709 tms.tms_stime = CONVTCK(stime); 710 711 tms.tms_cutime = CONVTCK(cutime); 712 tms.tms_cstime = CONVTCK(cstime); 713 714 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 715 return error; 716 717 microuptime(&tv); 718 td->td_retval[0] = (int)CONVTCK(tv); 719 return 0; 720 } 721 722 int 723 linux_newuname(struct thread *td, struct linux_newuname_args *args) 724 { 725 struct l_new_utsname utsname; 726 char osname[LINUX_MAX_UTSNAME]; 727 char osrelease[LINUX_MAX_UTSNAME]; 728 char *p; 729 730 #ifdef DEBUG 731 if (ldebug(newuname)) 732 printf(ARGS(newuname, "*")); 733 #endif 734 735 linux_get_osname(td, osname); 736 linux_get_osrelease(td, osrelease); 737 738 bzero(&utsname, sizeof(utsname)); 739 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 740 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 741 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 742 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 743 for (p = utsname.version; *p != '\0'; ++p) 744 if (*p == '\n') { 745 *p = '\0'; 746 break; 747 } 748 #ifdef __i386__ 749 { 750 const char *class; 751 switch (cpu_class) { 752 case CPUCLASS_686: 753 class = "i686"; 754 break; 755 case CPUCLASS_586: 756 class = "i586"; 757 break; 758 case CPUCLASS_486: 759 class = "i486"; 760 break; 761 default: 762 class = "i386"; 763 } 764 strlcpy(utsname.machine, class, LINUX_MAX_UTSNAME); 765 } 766 #else 767 strlcpy(utsname.machine, machine, LINUX_MAX_UTSNAME); 768 #endif 769 strlcpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME); 770 771 return (copyout(&utsname, args->buf, sizeof(utsname))); 772 } 773 774 #if defined(__i386__) || (defined(__amd64__) && COMPAT_LINUX32) 775 struct l_utimbuf { 776 l_time_t l_actime; 777 l_time_t l_modtime; 778 }; 779 780 int 781 linux_utime(struct thread *td, struct linux_utime_args *args) 782 { 783 struct timeval tv[2], *tvp; 784 struct l_utimbuf lut; 785 char *fname; 786 int error; 787 788 LCONVPATHEXIST(td, args->fname, &fname); 789 790 #ifdef DEBUG 791 if (ldebug(utime)) 792 printf(ARGS(utime, "%s, *"), fname); 793 #endif 794 795 if (args->times) { 796 if ((error = copyin(args->times, &lut, sizeof lut))) { 797 LFREEPATH(fname); 798 return error; 799 } 800 tv[0].tv_sec = lut.l_actime; 801 tv[0].tv_usec = 0; 802 tv[1].tv_sec = lut.l_modtime; 803 tv[1].tv_usec = 0; 804 tvp = tv; 805 } else 806 tvp = NULL; 807 808 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 809 LFREEPATH(fname); 810 return (error); 811 } 812 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 813 814 #define __WCLONE 0x80000000 815 816 #ifndef __alpha__ 817 int 818 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 819 { 820 int error, options, tmpstat; 821 822 #ifdef DEBUG 823 if (ldebug(waitpid)) 824 printf(ARGS(waitpid, "%d, %p, %d"), 825 args->pid, (void *)args->status, args->options); 826 #endif 827 828 options = (args->options & (WNOHANG | WUNTRACED)); 829 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 830 if (args->options & __WCLONE) 831 options |= WLINUXCLONE; 832 833 error = kern_wait(td, args->pid, &tmpstat, options, NULL); 834 if (error) 835 return error; 836 837 if (args->status) { 838 tmpstat &= 0xffff; 839 if (WIFSIGNALED(tmpstat)) 840 tmpstat = (tmpstat & 0xffffff80) | 841 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 842 else if (WIFSTOPPED(tmpstat)) 843 tmpstat = (tmpstat & 0xffff00ff) | 844 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 845 return copyout(&tmpstat, args->status, sizeof(int)); 846 } 847 848 return 0; 849 } 850 #endif /*!__alpha__*/ 851 852 int 853 linux_wait4(struct thread *td, struct linux_wait4_args *args) 854 { 855 int error, options, tmpstat; 856 struct rusage ru, *rup; 857 struct proc *p; 858 859 #ifdef DEBUG 860 if (ldebug(wait4)) 861 printf(ARGS(wait4, "%d, %p, %d, %p"), 862 args->pid, (void *)args->status, args->options, 863 (void *)args->rusage); 864 #endif 865 866 options = (args->options & (WNOHANG | WUNTRACED)); 867 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 868 if (args->options & __WCLONE) 869 options |= WLINUXCLONE; 870 871 if (args->rusage != NULL) 872 rup = &ru; 873 else 874 rup = NULL; 875 error = kern_wait(td, args->pid, &tmpstat, options, rup); 876 if (error) 877 return error; 878 879 p = td->td_proc; 880 PROC_LOCK(p); 881 SIGDELSET(p->p_siglist, SIGCHLD); 882 PROC_UNLOCK(p); 883 884 if (args->status) { 885 tmpstat &= 0xffff; 886 if (WIFSIGNALED(tmpstat)) 887 tmpstat = (tmpstat & 0xffffff80) | 888 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 889 else if (WIFSTOPPED(tmpstat)) 890 tmpstat = (tmpstat & 0xffff00ff) | 891 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 892 error = copyout(&tmpstat, args->status, sizeof(int)); 893 } 894 if (args->rusage != NULL && error == 0) 895 error = copyout(&ru, args->rusage, sizeof(ru)); 896 897 return (error); 898 } 899 900 int 901 linux_mknod(struct thread *td, struct linux_mknod_args *args) 902 { 903 char *path; 904 int error; 905 906 LCONVPATHCREAT(td, args->path, &path); 907 908 #ifdef DEBUG 909 if (ldebug(mknod)) 910 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 911 #endif 912 913 if (args->mode & S_IFIFO) 914 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 915 else 916 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 917 args->dev); 918 LFREEPATH(path); 919 return (error); 920 } 921 922 /* 923 * UGH! This is just about the dumbest idea I've ever heard!! 924 */ 925 int 926 linux_personality(struct thread *td, struct linux_personality_args *args) 927 { 928 #ifdef DEBUG 929 if (ldebug(personality)) 930 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 931 #endif 932 #ifndef __alpha__ 933 if (args->per != 0) 934 return EINVAL; 935 #endif 936 937 /* Yes Jim, it's still a Linux... */ 938 td->td_retval[0] = 0; 939 return 0; 940 } 941 942 struct l_itimerval { 943 l_timeval it_interval; 944 l_timeval it_value; 945 }; 946 947 int 948 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 949 { 950 int error; 951 caddr_t sg; 952 struct l_itimerval *lp, *lop, ls; 953 struct itimerval *p = NULL, *op = NULL, s; 954 955 #ifdef DEBUG 956 if (ldebug(setitimer)) 957 printf(ARGS(setitimer, "%p, %p"), 958 (void *)uap->itv, (void *)uap->oitv); 959 #endif 960 lp = uap->itv; 961 if (lp != NULL) { 962 sg = stackgap_init(); 963 p = stackgap_alloc(&sg, sizeof(struct itimerval)); 964 uap->itv = (struct l_itimerval *)p; 965 error = copyin(lp, &ls, sizeof(ls)); 966 if (error != 0) 967 return (error); 968 s.it_interval.tv_sec = ls.it_interval.tv_sec; 969 s.it_interval.tv_usec = ls.it_interval.tv_usec; 970 s.it_value.tv_sec = ls.it_value.tv_sec; 971 s.it_value.tv_usec = ls.it_value.tv_usec; 972 error = copyout(&s, p, sizeof(s)); 973 if (error != 0) 974 return (error); 975 #ifdef DEBUG 976 if (ldebug(setitimer)) { 977 printf("setitimer: value: sec: %ld, usec: %ld\n", 978 s.it_value.tv_sec, s.it_value.tv_usec); 979 printf("setitimer: interval: sec: %ld, usec: %ld\n", 980 s.it_interval.tv_sec, s.it_interval.tv_usec); 981 } 982 #endif 983 } 984 lop = uap->oitv; 985 if (lop != NULL) { 986 sg = stackgap_init(); 987 op = stackgap_alloc(&sg, sizeof(struct itimerval)); 988 uap->oitv = (struct l_itimerval *)op; 989 } 990 error = setitimer(td, (struct setitimer_args *) uap); 991 if (error != 0) 992 return (error); 993 if (lop != NULL) { 994 error = copyin(op, &s, sizeof(s)); 995 if (error != 0) 996 return (error); 997 ls.it_interval.tv_sec = s.it_interval.tv_sec; 998 ls.it_interval.tv_usec = s.it_interval.tv_usec; 999 ls.it_value.tv_sec = s.it_value.tv_sec; 1000 ls.it_value.tv_usec = s.it_value.tv_usec; 1001 error = copyout(&ls, lop, sizeof(ls)); 1002 } 1003 return (error); 1004 } 1005 1006 int 1007 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1008 { 1009 int error; 1010 caddr_t sg; 1011 struct l_itimerval *lp, ls; 1012 struct itimerval *p = NULL, s; 1013 1014 #ifdef DEBUG 1015 if (ldebug(getitimer)) 1016 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1017 #endif 1018 lp = uap->itv; 1019 if (lp != NULL) { 1020 sg = stackgap_init(); 1021 p = stackgap_alloc(&sg, sizeof(struct itimerval)); 1022 uap->itv = (struct l_itimerval *)p; 1023 } 1024 error = getitimer(td, (struct getitimer_args *) uap); 1025 if (error != 0) 1026 return (error); 1027 if (lp != NULL) { 1028 error = copyin(p, &s, sizeof(s)); 1029 if (error != 0) 1030 return (error); 1031 ls.it_interval.tv_sec = s.it_interval.tv_sec; 1032 ls.it_interval.tv_usec = s.it_interval.tv_usec; 1033 ls.it_value.tv_sec = s.it_value.tv_sec; 1034 ls.it_value.tv_usec = s.it_value.tv_usec; 1035 error = copyout(&ls, lp, sizeof(ls)); 1036 } 1037 return (error); 1038 } 1039 1040 #ifndef __alpha__ 1041 int 1042 linux_nice(struct thread *td, struct linux_nice_args *args) 1043 { 1044 struct setpriority_args bsd_args; 1045 1046 bsd_args.which = PRIO_PROCESS; 1047 bsd_args.who = 0; /* current process */ 1048 bsd_args.prio = args->inc; 1049 return setpriority(td, &bsd_args); 1050 } 1051 #endif /*!__alpha__*/ 1052 1053 int 1054 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1055 { 1056 struct ucred *newcred, *oldcred; 1057 l_gid_t linux_gidset[NGROUPS]; 1058 gid_t *bsd_gidset; 1059 int ngrp, error; 1060 struct proc *p; 1061 1062 ngrp = args->gidsetsize; 1063 if (ngrp < 0 || ngrp >= NGROUPS) 1064 return (EINVAL); 1065 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1066 if (error) 1067 return (error); 1068 newcred = crget(); 1069 p = td->td_proc; 1070 PROC_LOCK(p); 1071 oldcred = p->p_ucred; 1072 1073 /* 1074 * cr_groups[0] holds egid. Setting the whole set from 1075 * the supplied set will cause egid to be changed too. 1076 * Keep cr_groups[0] unchanged to prevent that. 1077 */ 1078 1079 if ((error = suser_cred(oldcred, SUSER_ALLOWJAIL)) != 0) { 1080 PROC_UNLOCK(p); 1081 crfree(newcred); 1082 return (error); 1083 } 1084 1085 crcopy(newcred, oldcred); 1086 if (ngrp > 0) { 1087 newcred->cr_ngroups = ngrp + 1; 1088 1089 bsd_gidset = newcred->cr_groups; 1090 ngrp--; 1091 while (ngrp >= 0) { 1092 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1093 ngrp--; 1094 } 1095 } 1096 else 1097 newcred->cr_ngroups = 1; 1098 1099 setsugid(p); 1100 p->p_ucred = newcred; 1101 PROC_UNLOCK(p); 1102 crfree(oldcred); 1103 return (0); 1104 } 1105 1106 int 1107 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1108 { 1109 struct ucred *cred; 1110 l_gid_t linux_gidset[NGROUPS]; 1111 gid_t *bsd_gidset; 1112 int bsd_gidsetsz, ngrp, error; 1113 1114 cred = td->td_ucred; 1115 bsd_gidset = cred->cr_groups; 1116 bsd_gidsetsz = cred->cr_ngroups - 1; 1117 1118 /* 1119 * cr_groups[0] holds egid. Returning the whole set 1120 * here will cause a duplicate. Exclude cr_groups[0] 1121 * to prevent that. 1122 */ 1123 1124 if ((ngrp = args->gidsetsize) == 0) { 1125 td->td_retval[0] = bsd_gidsetsz; 1126 return (0); 1127 } 1128 1129 if (ngrp < bsd_gidsetsz) 1130 return (EINVAL); 1131 1132 ngrp = 0; 1133 while (ngrp < bsd_gidsetsz) { 1134 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1135 ngrp++; 1136 } 1137 1138 if ((error = copyout(linux_gidset, args->grouplist, 1139 ngrp * sizeof(l_gid_t)))) 1140 return (error); 1141 1142 td->td_retval[0] = ngrp; 1143 return (0); 1144 } 1145 1146 #ifndef __alpha__ 1147 int 1148 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1149 { 1150 struct rlimit bsd_rlim; 1151 struct l_rlimit rlim; 1152 u_int which; 1153 int error; 1154 1155 #ifdef DEBUG 1156 if (ldebug(setrlimit)) 1157 printf(ARGS(setrlimit, "%d, %p"), 1158 args->resource, (void *)args->rlim); 1159 #endif 1160 1161 if (args->resource >= LINUX_RLIM_NLIMITS) 1162 return (EINVAL); 1163 1164 which = linux_to_bsd_resource[args->resource]; 1165 if (which == -1) 1166 return (EINVAL); 1167 1168 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1169 if (error) 1170 return (error); 1171 1172 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1173 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1174 return (kern_setrlimit(td, which, &bsd_rlim)); 1175 } 1176 1177 int 1178 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1179 { 1180 struct l_rlimit rlim; 1181 struct proc *p = td->td_proc; 1182 struct rlimit bsd_rlim; 1183 u_int which; 1184 1185 #ifdef DEBUG 1186 if (ldebug(old_getrlimit)) 1187 printf(ARGS(old_getrlimit, "%d, %p"), 1188 args->resource, (void *)args->rlim); 1189 #endif 1190 1191 if (args->resource >= LINUX_RLIM_NLIMITS) 1192 return (EINVAL); 1193 1194 which = linux_to_bsd_resource[args->resource]; 1195 if (which == -1) 1196 return (EINVAL); 1197 1198 PROC_LOCK(p); 1199 lim_rlimit(p, which, &bsd_rlim); 1200 PROC_UNLOCK(p); 1201 1202 #if !COMPAT_LINUX32 1203 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1204 if (rlim.rlim_cur == ULONG_MAX) 1205 rlim.rlim_cur = LONG_MAX; 1206 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1207 if (rlim.rlim_max == ULONG_MAX) 1208 rlim.rlim_max = LONG_MAX; 1209 #else 1210 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1211 if (rlim.rlim_cur == UINT_MAX) 1212 rlim.rlim_cur = INT_MAX; 1213 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1214 if (rlim.rlim_max == UINT_MAX) 1215 rlim.rlim_max = INT_MAX; 1216 #endif 1217 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1218 } 1219 1220 int 1221 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1222 { 1223 struct l_rlimit rlim; 1224 struct proc *p = td->td_proc; 1225 struct rlimit bsd_rlim; 1226 u_int which; 1227 1228 #ifdef DEBUG 1229 if (ldebug(getrlimit)) 1230 printf(ARGS(getrlimit, "%d, %p"), 1231 args->resource, (void *)args->rlim); 1232 #endif 1233 1234 if (args->resource >= LINUX_RLIM_NLIMITS) 1235 return (EINVAL); 1236 1237 which = linux_to_bsd_resource[args->resource]; 1238 if (which == -1) 1239 return (EINVAL); 1240 1241 PROC_LOCK(p); 1242 lim_rlimit(p, which, &bsd_rlim); 1243 PROC_UNLOCK(p); 1244 1245 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1246 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1247 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1248 } 1249 #endif /*!__alpha__*/ 1250 1251 int 1252 linux_sched_setscheduler(struct thread *td, 1253 struct linux_sched_setscheduler_args *args) 1254 { 1255 struct sched_setscheduler_args bsd; 1256 1257 #ifdef DEBUG 1258 if (ldebug(sched_setscheduler)) 1259 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1260 args->pid, args->policy, (const void *)args->param); 1261 #endif 1262 1263 switch (args->policy) { 1264 case LINUX_SCHED_OTHER: 1265 bsd.policy = SCHED_OTHER; 1266 break; 1267 case LINUX_SCHED_FIFO: 1268 bsd.policy = SCHED_FIFO; 1269 break; 1270 case LINUX_SCHED_RR: 1271 bsd.policy = SCHED_RR; 1272 break; 1273 default: 1274 return EINVAL; 1275 } 1276 1277 bsd.pid = args->pid; 1278 bsd.param = (struct sched_param *)args->param; 1279 return sched_setscheduler(td, &bsd); 1280 } 1281 1282 int 1283 linux_sched_getscheduler(struct thread *td, 1284 struct linux_sched_getscheduler_args *args) 1285 { 1286 struct sched_getscheduler_args bsd; 1287 int error; 1288 1289 #ifdef DEBUG 1290 if (ldebug(sched_getscheduler)) 1291 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1292 #endif 1293 1294 bsd.pid = args->pid; 1295 error = sched_getscheduler(td, &bsd); 1296 1297 switch (td->td_retval[0]) { 1298 case SCHED_OTHER: 1299 td->td_retval[0] = LINUX_SCHED_OTHER; 1300 break; 1301 case SCHED_FIFO: 1302 td->td_retval[0] = LINUX_SCHED_FIFO; 1303 break; 1304 case SCHED_RR: 1305 td->td_retval[0] = LINUX_SCHED_RR; 1306 break; 1307 } 1308 1309 return error; 1310 } 1311 1312 int 1313 linux_sched_get_priority_max(struct thread *td, 1314 struct linux_sched_get_priority_max_args *args) 1315 { 1316 struct sched_get_priority_max_args bsd; 1317 1318 #ifdef DEBUG 1319 if (ldebug(sched_get_priority_max)) 1320 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1321 #endif 1322 1323 switch (args->policy) { 1324 case LINUX_SCHED_OTHER: 1325 bsd.policy = SCHED_OTHER; 1326 break; 1327 case LINUX_SCHED_FIFO: 1328 bsd.policy = SCHED_FIFO; 1329 break; 1330 case LINUX_SCHED_RR: 1331 bsd.policy = SCHED_RR; 1332 break; 1333 default: 1334 return EINVAL; 1335 } 1336 return sched_get_priority_max(td, &bsd); 1337 } 1338 1339 int 1340 linux_sched_get_priority_min(struct thread *td, 1341 struct linux_sched_get_priority_min_args *args) 1342 { 1343 struct sched_get_priority_min_args bsd; 1344 1345 #ifdef DEBUG 1346 if (ldebug(sched_get_priority_min)) 1347 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1348 #endif 1349 1350 switch (args->policy) { 1351 case LINUX_SCHED_OTHER: 1352 bsd.policy = SCHED_OTHER; 1353 break; 1354 case LINUX_SCHED_FIFO: 1355 bsd.policy = SCHED_FIFO; 1356 break; 1357 case LINUX_SCHED_RR: 1358 bsd.policy = SCHED_RR; 1359 break; 1360 default: 1361 return EINVAL; 1362 } 1363 return sched_get_priority_min(td, &bsd); 1364 } 1365 1366 #define REBOOT_CAD_ON 0x89abcdef 1367 #define REBOOT_CAD_OFF 0 1368 #define REBOOT_HALT 0xcdef0123 1369 1370 int 1371 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1372 { 1373 struct reboot_args bsd_args; 1374 1375 #ifdef DEBUG 1376 if (ldebug(reboot)) 1377 printf(ARGS(reboot, "0x%x"), args->cmd); 1378 #endif 1379 if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF) 1380 return (0); 1381 bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0; 1382 return (reboot(td, &bsd_args)); 1383 } 1384 1385 #ifndef __alpha__ 1386 1387 /* 1388 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1389 * td->td_retval[1] when COMPAT_43 is defined. This 1390 * globbers registers that are assumed to be preserved. The following 1391 * lightweight syscalls fixes this. See also linux_getgid16() and 1392 * linux_getuid16() in linux_uid16.c. 1393 * 1394 * linux_getpid() - MP SAFE 1395 * linux_getgid() - MP SAFE 1396 * linux_getuid() - MP SAFE 1397 */ 1398 1399 int 1400 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1401 { 1402 1403 td->td_retval[0] = td->td_proc->p_pid; 1404 return (0); 1405 } 1406 1407 int 1408 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1409 { 1410 1411 td->td_retval[0] = td->td_ucred->cr_rgid; 1412 return (0); 1413 } 1414 1415 int 1416 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1417 { 1418 1419 td->td_retval[0] = td->td_ucred->cr_ruid; 1420 return (0); 1421 } 1422 1423 #endif /*!__alpha__*/ 1424 1425 int 1426 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1427 { 1428 struct getsid_args bsd; 1429 bsd.pid = args->pid; 1430 return getsid(td, &bsd); 1431 } 1432